2024-12-09 03:23:00,412 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 03:23:00,422 main DEBUG Took 0.008658 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 03:23:00,422 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 03:23:00,423 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 03:23:00,424 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 03:23:00,425 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,432 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 03:23:00,443 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,444 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,445 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,445 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,446 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,446 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,447 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,447 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,447 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,447 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,448 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,448 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,449 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,449 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,449 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,450 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,450 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,450 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,451 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,451 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,451 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,451 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,452 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,452 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 03:23:00,452 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,453 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 03:23:00,455 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 03:23:00,456 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 03:23:00,458 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 03:23:00,458 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 03:23:00,459 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 03:23:00,459 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 03:23:00,466 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 03:23:00,468 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 03:23:00,470 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 03:23:00,470 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 03:23:00,470 main DEBUG createAppenders(={Console}) 2024-12-09 03:23:00,471 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-09 03:23:00,471 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 03:23:00,471 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-09 03:23:00,472 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 03:23:00,472 main DEBUG OutputStream closed 2024-12-09 03:23:00,472 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 03:23:00,473 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 03:23:00,473 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-09 03:23:00,539 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 03:23:00,542 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 03:23:00,543 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 03:23:00,544 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 03:23:00,545 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 03:23:00,545 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 03:23:00,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 03:23:00,546 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 03:23:00,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 03:23:00,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 03:23:00,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 03:23:00,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 03:23:00,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 03:23:00,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 03:23:00,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 03:23:00,549 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 03:23:00,549 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 03:23:00,550 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 03:23:00,552 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 03:23:00,553 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-09 03:23:00,553 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 03:23:00,554 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-09T03:23:00,809 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1 2024-12-09 03:23:00,812 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 03:23:00,813 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T03:23:00,824 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-09T03:23:00,860 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=241, ProcessCount=11, AvailableMemoryMB=6092 2024-12-09T03:23:00,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T03:23:00,883 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85, deleteOnExit=true 2024-12-09T03:23:00,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T03:23:00,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/test.cache.data in system properties and HBase conf 2024-12-09T03:23:00,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T03:23:00,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/hadoop.log.dir in system properties and HBase conf 2024-12-09T03:23:00,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T03:23:00,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T03:23:00,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T03:23:00,972 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T03:23:01,054 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T03:23:01,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:23:01,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:23:01,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T03:23:01,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:23:01,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T03:23:01,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T03:23:01,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:23:01,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:23:01,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T03:23:01,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/nfs.dump.dir in system properties and HBase conf 2024-12-09T03:23:01,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/java.io.tmpdir in system properties and HBase conf 2024-12-09T03:23:01,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:23:01,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T03:23:01,062 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T03:23:01,527 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:23:02,427 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T03:23:02,501 INFO [Time-limited test {}] log.Log(170): Logging initialized @2750ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T03:23:02,579 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:23:02,636 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:23:02,660 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:23:02,660 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:23:02,662 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:23:02,672 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:23:02,674 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:23:02,675 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:23:02,889 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de997b9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/java.io.tmpdir/jetty-localhost-41781-hadoop-hdfs-3_4_1-tests_jar-_-any-159742628451958089/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:23:02,898 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:41781} 2024-12-09T03:23:02,899 INFO [Time-limited test {}] server.Server(415): Started @3149ms 2024-12-09T03:23:02,928 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:23:03,560 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:23:03,569 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:23:03,571 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:23:03,571 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:23:03,572 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:23:03,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3305dd74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:23:03,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7893eb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:23:03,700 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f93babe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/java.io.tmpdir/jetty-localhost-39127-hadoop-hdfs-3_4_1-tests_jar-_-any-9776994414737260516/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:23:03,701 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@737d6c99{HTTP/1.1, (http/1.1)}{localhost:39127} 2024-12-09T03:23:03,702 INFO [Time-limited test {}] server.Server(415): Started @3952ms 2024-12-09T03:23:03,783 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:23:03,917 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:23:03,926 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:23:03,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:23:03,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:23:03,930 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:23:03,934 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ff5148a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:23:03,936 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25ca9bb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:23:04,072 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c963ecd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/java.io.tmpdir/jetty-localhost-41867-hadoop-hdfs-3_4_1-tests_jar-_-any-10730259913563858572/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:23:04,072 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a10aed{HTTP/1.1, (http/1.1)}{localhost:41867} 2024-12-09T03:23:04,073 INFO [Time-limited test {}] server.Server(415): Started @4323ms 2024-12-09T03:23:04,075 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:23:05,314 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85/data/data3/current/BP-1780146639-172.17.0.3-1733714581617/current, will proceed with Du for space computation calculation, 2024-12-09T03:23:05,314 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85/data/data4/current/BP-1780146639-172.17.0.3-1733714581617/current, will proceed with Du for space computation calculation, 2024-12-09T03:23:05,314 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85/data/data2/current/BP-1780146639-172.17.0.3-1733714581617/current, will proceed with Du for space computation calculation, 2024-12-09T03:23:05,314 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85/data/data1/current/BP-1780146639-172.17.0.3-1733714581617/current, will proceed with Du for space computation calculation, 2024-12-09T03:23:05,348 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:23:05,353 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:23:05,403 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1 2024-12-09T03:23:05,405 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5925e88e9420c76d with lease ID 0xae4a62ac5701a302: Processing first storage report for DS-62da69f9-5abc-4d8f-a735-938a703c0b51 from datanode DatanodeRegistration(127.0.0.1:35417, datanodeUuid=7c6efea1-15c1-40fa-87b3-8c409a3eddb6, infoPort=41059, infoSecurePort=0, ipcPort=39381, storageInfo=lv=-57;cid=testClusterID;nsid=325709896;c=1733714581617) 2024-12-09T03:23:05,406 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5925e88e9420c76d with lease ID 0xae4a62ac5701a302: from storage DS-62da69f9-5abc-4d8f-a735-938a703c0b51 node DatanodeRegistration(127.0.0.1:35417, datanodeUuid=7c6efea1-15c1-40fa-87b3-8c409a3eddb6, infoPort=41059, infoSecurePort=0, ipcPort=39381, storageInfo=lv=-57;cid=testClusterID;nsid=325709896;c=1733714581617), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T03:23:05,407 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9379e9d82a3491c2 with lease ID 0xae4a62ac5701a301: Processing first storage report for DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099 from datanode DatanodeRegistration(127.0.0.1:45115, datanodeUuid=4cab0383-56dc-4838-a75a-d5e3d05c60e6, infoPort=33613, infoSecurePort=0, ipcPort=35285, storageInfo=lv=-57;cid=testClusterID;nsid=325709896;c=1733714581617) 2024-12-09T03:23:05,407 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9379e9d82a3491c2 with lease ID 0xae4a62ac5701a301: from storage DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099 node DatanodeRegistration(127.0.0.1:45115, datanodeUuid=4cab0383-56dc-4838-a75a-d5e3d05c60e6, infoPort=33613, infoSecurePort=0, ipcPort=35285, storageInfo=lv=-57;cid=testClusterID;nsid=325709896;c=1733714581617), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:23:05,407 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5925e88e9420c76d with lease ID 0xae4a62ac5701a302: Processing first storage report for DS-f03217ae-90f2-41bd-a429-21eac21957a8 from datanode DatanodeRegistration(127.0.0.1:35417, datanodeUuid=7c6efea1-15c1-40fa-87b3-8c409a3eddb6, infoPort=41059, infoSecurePort=0, ipcPort=39381, storageInfo=lv=-57;cid=testClusterID;nsid=325709896;c=1733714581617) 2024-12-09T03:23:05,408 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5925e88e9420c76d with lease ID 0xae4a62ac5701a302: from storage DS-f03217ae-90f2-41bd-a429-21eac21957a8 node DatanodeRegistration(127.0.0.1:35417, datanodeUuid=7c6efea1-15c1-40fa-87b3-8c409a3eddb6, infoPort=41059, infoSecurePort=0, ipcPort=39381, storageInfo=lv=-57;cid=testClusterID;nsid=325709896;c=1733714581617), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:23:05,408 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9379e9d82a3491c2 with lease ID 0xae4a62ac5701a301: Processing first storage report for DS-a9c9302b-f106-44a0-96cc-e01de527ae30 from datanode DatanodeRegistration(127.0.0.1:45115, datanodeUuid=4cab0383-56dc-4838-a75a-d5e3d05c60e6, infoPort=33613, infoSecurePort=0, ipcPort=35285, storageInfo=lv=-57;cid=testClusterID;nsid=325709896;c=1733714581617) 2024-12-09T03:23:05,408 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9379e9d82a3491c2 with lease ID 0xae4a62ac5701a301: from storage DS-a9c9302b-f106-44a0-96cc-e01de527ae30 node DatanodeRegistration(127.0.0.1:45115, datanodeUuid=4cab0383-56dc-4838-a75a-d5e3d05c60e6, infoPort=33613, infoSecurePort=0, ipcPort=35285, storageInfo=lv=-57;cid=testClusterID;nsid=325709896;c=1733714581617), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:23:05,495 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85/zookeeper_0, clientPort=64205, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T03:23:05,507 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64205 2024-12-09T03:23:05,517 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:23:05,521 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:23:05,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:23:05,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:23:06,210 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66 with version=8 2024-12-09T03:23:06,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/hbase-staging 2024-12-09T03:23:06,298 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T03:23:06,512 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:23:06,521 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:23:06,521 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:23:06,526 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:23:06,526 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:23:06,526 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:23:06,675 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T03:23:06,751 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T03:23:06,760 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T03:23:06,764 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:23:06,789 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 61681 (auto-detected) 2024-12-09T03:23:06,790 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-09T03:23:06,811 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43563 2024-12-09T03:23:06,835 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43563 connecting to ZooKeeper ensemble=127.0.0.1:64205 2024-12-09T03:23:06,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:435630x0, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:23:06,964 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43563-0x10008992e490000 connected 2024-12-09T03:23:07,592 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:23:07,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:23:07,606 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:23:07,610 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66, hbase.cluster.distributed=false 2024-12-09T03:23:07,630 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:23:07,635 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43563 2024-12-09T03:23:07,635 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43563 2024-12-09T03:23:07,635 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43563 2024-12-09T03:23:07,636 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43563 2024-12-09T03:23:07,636 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43563 2024-12-09T03:23:07,730 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:23:07,731 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:23:07,731 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:23:07,732 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:23:07,732 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:23:07,732 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:23:07,734 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:23:07,736 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:23:07,737 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41341 2024-12-09T03:23:07,739 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41341 connecting to ZooKeeper ensemble=127.0.0.1:64205 2024-12-09T03:23:07,740 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:23:07,743 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:23:07,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413410x0, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:23:07,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41341-0x10008992e490001 connected 2024-12-09T03:23:07,813 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:23:07,819 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:23:07,829 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:23:07,831 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:23:07,836 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:23:07,837 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41341 2024-12-09T03:23:07,837 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41341 2024-12-09T03:23:07,838 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41341 2024-12-09T03:23:07,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41341 2024-12-09T03:23:07,839 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41341 2024-12-09T03:23:07,852 DEBUG [M:0;1617b0b1421f:43563 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1617b0b1421f:43563 2024-12-09T03:23:07,853 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1617b0b1421f,43563,1733714586347 2024-12-09T03:23:07,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:23:07,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:23:07,935 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1617b0b1421f,43563,1733714586347 2024-12-09T03:23:08,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:23:08,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:08,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:08,015 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T03:23:08,018 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1617b0b1421f,43563,1733714586347 from backup master directory 2024-12-09T03:23:08,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1617b0b1421f,43563,1733714586347 2024-12-09T03:23:08,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:23:08,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:23:08,056 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:23:08,056 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1617b0b1421f,43563,1733714586347 2024-12-09T03:23:08,061 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T03:23:08,062 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T03:23:08,118 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/hbase.id] with ID: 17bae4ee-295e-4bc4-98ce-3dcc6c699c0d 2024-12-09T03:23:08,119 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/.tmp/hbase.id 2024-12-09T03:23:08,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:23:08,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:23:08,131 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/.tmp/hbase.id]:[hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/hbase.id] 2024-12-09T03:23:08,174 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:23:08,179 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T03:23:08,197 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-12-09T03:23:08,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:08,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:08,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:23:08,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:23:08,292 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:23:08,293 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T03:23:08,298 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:23:08,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:23:08,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:23:08,340 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store 2024-12-09T03:23:08,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:23:08,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:23:08,361 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T03:23:08,364 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:23:08,366 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:23:08,366 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:23:08,366 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:23:08,368 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:23:08,368 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:23:08,368 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:23:08,369 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714588366Disabling compacts and flushes for region at 1733714588366Disabling writes for close at 1733714588368 (+2 ms)Writing region close event to WAL at 1733714588368Closed at 1733714588368 2024-12-09T03:23:08,371 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/.initializing 2024-12-09T03:23:08,371 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/WALs/1617b0b1421f,43563,1733714586347 2024-12-09T03:23:08,391 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C43563%2C1733714586347, suffix=, logDir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/WALs/1617b0b1421f,43563,1733714586347, archiveDir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/oldWALs, maxLogs=10 2024-12-09T03:23:08,399 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C43563%2C1733714586347.1733714588395 2024-12-09T03:23:08,415 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/WALs/1617b0b1421f,43563,1733714586347/1617b0b1421f%2C43563%2C1733714586347.1733714588395 2024-12-09T03:23:08,422 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41059:41059),(127.0.0.1/127.0.0.1:33613:33613)] 2024-12-09T03:23:08,423 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:23:08,423 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:23:08,426 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,427 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,460 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T03:23:08,484 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:08,486 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:23:08,487 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,490 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T03:23:08,490 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:08,491 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:23:08,491 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,493 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T03:23:08,494 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:08,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:23:08,495 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,497 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T03:23:08,497 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:08,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:23:08,498 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,501 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,503 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,508 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,509 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,512 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:23:08,515 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:23:08,519 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:23:08,520 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=874938, jitterRate=0.1125422865152359}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:23:08,526 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733714588438Initializing all the Stores at 1733714588440 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714588440Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714588441 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714588441Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714588441Cleaning up temporary data from old regions at 1733714588509 (+68 ms)Region opened successfully at 1733714588526 (+17 ms) 2024-12-09T03:23:08,527 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T03:23:08,561 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31aa1bbb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:23:08,594 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T03:23:08,606 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T03:23:08,606 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T03:23:08,610 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T03:23:08,612 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T03:23:08,618 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-09T03:23:08,618 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T03:23:08,656 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T03:23:08,666 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T03:23:08,687 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T03:23:08,690 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T03:23:08,692 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T03:23:08,701 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T03:23:08,704 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T03:23:08,708 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T03:23:08,718 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T03:23:08,720 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T03:23:08,733 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T03:23:08,749 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T03:23:08,760 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T03:23:08,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:23:08,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:23:08,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:08,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:08,778 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1617b0b1421f,43563,1733714586347, sessionid=0x10008992e490000, setting cluster-up flag (Was=false) 2024-12-09T03:23:08,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:08,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:08,845 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T03:23:08,848 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,43563,1733714586347 2024-12-09T03:23:08,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:08,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:08,901 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T03:23:08,904 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,43563,1733714586347 2024-12-09T03:23:08,910 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T03:23:08,944 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(746): ClusterId : 17bae4ee-295e-4bc4-98ce-3dcc6c699c0d 2024-12-09T03:23:08,947 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:23:08,963 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:23:08,963 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:23:08,976 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:23:08,977 DEBUG [RS:0;1617b0b1421f:41341 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@772a2895, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:23:08,986 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T03:23:08,995 DEBUG [RS:0;1617b0b1421f:41341 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1617b0b1421f:41341 2024-12-09T03:23:09,002 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T03:23:09,003 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:23:09,003 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:23:09,003 DEBUG [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:23:09,007 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(2659): reportForDuty to master=1617b0b1421f,43563,1733714586347 with port=41341, startcode=1733714587697 2024-12-09T03:23:09,010 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T03:23:09,017 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1617b0b1421f,43563,1733714586347 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T03:23:09,023 DEBUG [RS:0;1617b0b1421f:41341 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:23:09,027 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:23:09,027 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:23:09,027 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:23:09,027 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:23:09,027 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1617b0b1421f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T03:23:09,028 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,028 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:23:09,028 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,038 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:23:09,038 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T03:23:09,039 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733714619039 2024-12-09T03:23:09,041 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T03:23:09,042 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T03:23:09,044 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:09,045 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T03:23:09,045 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T03:23:09,046 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T03:23:09,046 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T03:23:09,046 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T03:23:09,047 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,051 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T03:23:09,052 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T03:23:09,052 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T03:23:09,057 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T03:23:09,057 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T03:23:09,059 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714589059,5,FailOnTimeoutGroup] 2024-12-09T03:23:09,060 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714589059,5,FailOnTimeoutGroup] 2024-12-09T03:23:09,060 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:23:09,060 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T03:23:09,062 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:23:09,062 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,065 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T03:23:09,065 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66 2024-12-09T03:23:09,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:23:09,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:23:09,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:23:09,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:23:09,095 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:23:09,096 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:09,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:23:09,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:23:09,100 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:23:09,100 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:09,101 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58241, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:23:09,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:23:09,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:23:09,105 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:23:09,105 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:09,106 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:23:09,106 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:23:09,109 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:23:09,109 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:09,109 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43563 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1617b0b1421f,41341,1733714587697 2024-12-09T03:23:09,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:23:09,110 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:23:09,111 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740 2024-12-09T03:23:09,112 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740 2024-12-09T03:23:09,112 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43563 {}] master.ServerManager(517): Registering regionserver=1617b0b1421f,41341,1733714587697 2024-12-09T03:23:09,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:23:09,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:23:09,116 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:23:09,118 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:23:09,123 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:23:09,124 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873383, jitterRate=0.11056417226791382}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:23:09,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733714589090Initializing all the Stores at 1733714589091 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714589091Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714589092 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714589092Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714589092Cleaning up temporary data from old regions at 1733714589115 (+23 ms)Region opened successfully at 1733714589126 (+11 ms) 2024-12-09T03:23:09,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:23:09,127 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:23:09,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:23:09,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:23:09,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:23:09,128 DEBUG [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66 2024-12-09T03:23:09,128 DEBUG [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40037 2024-12-09T03:23:09,129 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:23:09,129 DEBUG [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:23:09,129 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714589127Disabling compacts and flushes for region at 1733714589127Disabling writes for close at 1733714589127Writing region close event to WAL at 1733714589128 (+1 ms)Closed at 1733714589128 2024-12-09T03:23:09,132 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:23:09,132 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T03:23:09,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T03:23:09,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:23:09,144 DEBUG [RS:0;1617b0b1421f:41341 {}] zookeeper.ZKUtil(111): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1617b0b1421f,41341,1733714587697 2024-12-09T03:23:09,144 WARN [RS:0;1617b0b1421f:41341 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:23:09,145 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:23:09,145 INFO [RS:0;1617b0b1421f:41341 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:23:09,145 DEBUG [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697 2024-12-09T03:23:09,146 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1617b0b1421f,41341,1733714587697] 2024-12-09T03:23:09,147 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T03:23:09,168 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:23:09,178 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:23:09,182 INFO [RS:0;1617b0b1421f:41341 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:23:09,182 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,183 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:23:09,188 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:23:09,190 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,190 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,190 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,190 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,190 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,191 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,191 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:23:09,191 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,191 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,191 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,191 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,191 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,192 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:23:09,192 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:23:09,192 DEBUG [RS:0;1617b0b1421f:41341 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:23:09,193 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,193 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,193 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,193 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,193 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,193 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,41341,1733714587697-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:23:09,208 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:23:09,209 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,41341,1733714587697-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,210 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,210 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.Replication(171): 1617b0b1421f,41341,1733714587697 started 2024-12-09T03:23:09,225 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:09,225 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(1482): Serving as 1617b0b1421f,41341,1733714587697, RpcServer on 1617b0b1421f/172.17.0.3:41341, sessionid=0x10008992e490001 2024-12-09T03:23:09,226 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:23:09,226 DEBUG [RS:0;1617b0b1421f:41341 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1617b0b1421f,41341,1733714587697 2024-12-09T03:23:09,226 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,41341,1733714587697' 2024-12-09T03:23:09,226 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:23:09,227 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:23:09,228 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:23:09,228 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:23:09,228 DEBUG [RS:0;1617b0b1421f:41341 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1617b0b1421f,41341,1733714587697 2024-12-09T03:23:09,228 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,41341,1733714587697' 2024-12-09T03:23:09,228 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:23:09,229 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:23:09,230 DEBUG [RS:0;1617b0b1421f:41341 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:23:09,230 INFO [RS:0;1617b0b1421f:41341 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:23:09,230 INFO [RS:0;1617b0b1421f:41341 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:23:09,299 WARN [1617b0b1421f:43563 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T03:23:09,342 INFO [RS:0;1617b0b1421f:41341 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C41341%2C1733714587697, suffix=, logDir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697, archiveDir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/oldWALs, maxLogs=32 2024-12-09T03:23:09,345 INFO [RS:0;1617b0b1421f:41341 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C41341%2C1733714587697.1733714589345 2024-12-09T03:23:09,355 INFO [RS:0;1617b0b1421f:41341 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714589345 2024-12-09T03:23:09,356 DEBUG [RS:0;1617b0b1421f:41341 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33613:33613),(127.0.0.1/127.0.0.1:41059:41059)] 2024-12-09T03:23:09,551 DEBUG [1617b0b1421f:43563 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T03:23:09,562 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1617b0b1421f,41341,1733714587697 2024-12-09T03:23:09,569 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,41341,1733714587697, state=OPENING 2024-12-09T03:23:09,594 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T03:23:09,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:09,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:23:09,608 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:23:09,608 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:23:09,609 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:23:09,611 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,41341,1733714587697}] 2024-12-09T03:23:09,784 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T03:23:09,787 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55583, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T03:23:09,798 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T03:23:09,798 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:23:09,802 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C41341%2C1733714587697.meta, suffix=.meta, logDir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697, archiveDir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/oldWALs, maxLogs=32 2024-12-09T03:23:09,804 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C41341%2C1733714587697.meta.1733714589804.meta 2024-12-09T03:23:09,812 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.meta.1733714589804.meta 2024-12-09T03:23:09,814 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33613:33613),(127.0.0.1/127.0.0.1:41059:41059)] 2024-12-09T03:23:09,815 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:23:09,818 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T03:23:09,821 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T03:23:09,827 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T03:23:09,833 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T03:23:09,834 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:23:09,834 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T03:23:09,834 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T03:23:09,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:23:09,840 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:23:09,840 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:09,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:23:09,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:23:09,843 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:23:09,843 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:09,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:23:09,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:23:09,846 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:23:09,846 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:09,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:23:09,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:23:09,849 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:23:09,849 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:09,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:23:09,850 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:23:09,852 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740 2024-12-09T03:23:09,855 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740 2024-12-09T03:23:09,858 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:23:09,858 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:23:09,860 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:23:09,862 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:23:09,865 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792572, jitterRate=0.007808610796928406}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:23:09,865 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T03:23:09,867 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733714589835Writing region info on filesystem at 1733714589835Initializing all the Stores at 1733714589837 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714589837Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714589838 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714589838Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714589838Cleaning up temporary data from old regions at 1733714589859 (+21 ms)Running coprocessor post-open hooks at 1733714589865 (+6 ms)Region opened successfully at 1733714589867 (+2 ms) 2024-12-09T03:23:09,876 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733714589776 2024-12-09T03:23:09,892 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T03:23:09,893 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T03:23:09,894 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,41341,1733714587697 2024-12-09T03:23:09,896 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,41341,1733714587697, state=OPEN 2024-12-09T03:23:10,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:23:10,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:23:10,000 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:23:10,000 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:23:10,000 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1617b0b1421f,41341,1733714587697 2024-12-09T03:23:10,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T03:23:10,007 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,41341,1733714587697 in 389 msec 2024-12-09T03:23:10,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T03:23:10,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 872 msec 2024-12-09T03:23:10,018 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:23:10,018 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T03:23:10,040 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:23:10,041 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,41341,1733714587697, seqNum=-1] 2024-12-09T03:23:10,062 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:23:10,065 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35251, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:23:10,086 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1440 sec 2024-12-09T03:23:10,087 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733714590087, completionTime=-1 2024-12-09T03:23:10,089 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T03:23:10,090 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T03:23:10,117 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T03:23:10,118 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733714650118 2024-12-09T03:23:10,118 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733714710118 2024-12-09T03:23:10,118 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 28 msec 2024-12-09T03:23:10,121 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,43563,1733714586347-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:10,121 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,43563,1733714586347-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:10,122 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,43563,1733714586347-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:10,124 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1617b0b1421f:43563, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:10,124 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:10,125 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:10,131 DEBUG [master/1617b0b1421f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T03:23:10,153 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.096sec 2024-12-09T03:23:10,154 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T03:23:10,155 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T03:23:10,156 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T03:23:10,157 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T03:23:10,157 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T03:23:10,157 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,43563,1733714586347-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:23:10,158 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,43563,1733714586347-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T03:23:10,167 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T03:23:10,168 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T03:23:10,168 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,43563,1733714586347-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:23:10,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a4c629c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:23:10,258 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T03:23:10,258 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T03:23:10,262 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1617b0b1421f,43563,-1 for getting cluster id 2024-12-09T03:23:10,266 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T03:23:10,279 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '17bae4ee-295e-4bc4-98ce-3dcc6c699c0d' 2024-12-09T03:23:10,284 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T03:23:10,284 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "17bae4ee-295e-4bc4-98ce-3dcc6c699c0d" 2024-12-09T03:23:10,285 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b6d8d08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:23:10,285 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1617b0b1421f,43563,-1] 2024-12-09T03:23:10,289 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T03:23:10,292 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:23:10,293 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41450, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T03:23:10,297 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5286c427, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:23:10,297 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:23:10,306 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,41341,1733714587697, seqNum=-1] 2024-12-09T03:23:10,306 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:23:10,309 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52442, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:23:10,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1617b0b1421f,43563,1733714586347 2024-12-09T03:23:10,333 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:23:10,340 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T03:23:10,363 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T03:23:10,369 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 1617b0b1421f,43563,1733714586347 2024-12-09T03:23:10,372 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@551a3624 2024-12-09T03:23:10,373 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T03:23:10,378 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41456, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T03:23:10,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43563 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T03:23:10,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43563 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T03:23:10,389 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43563 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:23:10,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43563 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-09T03:23:10,401 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T03:23:10,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43563 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-09T03:23:10,405 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:10,408 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T03:23:10,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:23:10,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741835_1011 (size=389) 2024-12-09T03:23:10,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741835_1011 (size=389) 2024-12-09T03:23:10,466 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9555fcc2c2233f078514c4b204fafd93, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66 2024-12-09T03:23:10,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741836_1012 (size=72) 2024-12-09T03:23:10,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741836_1012 (size=72) 2024-12-09T03:23:10,482 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:23:10,482 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 9555fcc2c2233f078514c4b204fafd93, disabling compactions & flushes 2024-12-09T03:23:10,482 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:23:10,483 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:23:10,483 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. after waiting 0 ms 2024-12-09T03:23:10,483 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:23:10,483 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:23:10,483 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9555fcc2c2233f078514c4b204fafd93: Waiting for close lock at 1733714590482Disabling compacts and flushes for region at 1733714590482Disabling writes for close at 1733714590483 (+1 ms)Writing region close event to WAL at 1733714590483Closed at 1733714590483 2024-12-09T03:23:10,485 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T03:23:10,490 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733714590485"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733714590485"}]},"ts":"1733714590485"} 2024-12-09T03:23:10,499 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T03:23:10,502 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T03:23:10,505 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733714590502"}]},"ts":"1733714590502"} 2024-12-09T03:23:10,511 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-09T03:23:10,513 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9555fcc2c2233f078514c4b204fafd93, ASSIGN}] 2024-12-09T03:23:10,516 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9555fcc2c2233f078514c4b204fafd93, ASSIGN 2024-12-09T03:23:10,519 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9555fcc2c2233f078514c4b204fafd93, ASSIGN; state=OFFLINE, location=1617b0b1421f,41341,1733714587697; forceNewPlan=false, retain=false 2024-12-09T03:23:10,670 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9555fcc2c2233f078514c4b204fafd93, regionState=OPENING, regionLocation=1617b0b1421f,41341,1733714587697 2024-12-09T03:23:10,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9555fcc2c2233f078514c4b204fafd93, ASSIGN because future has completed 2024-12-09T03:23:10,675 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9555fcc2c2233f078514c4b204fafd93, server=1617b0b1421f,41341,1733714587697}] 2024-12-09T03:23:10,836 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:23:10,836 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9555fcc2c2233f078514c4b204fafd93, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:23:10,837 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:10,837 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:23:10,837 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:10,838 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:10,840 INFO [StoreOpener-9555fcc2c2233f078514c4b204fafd93-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:10,843 INFO [StoreOpener-9555fcc2c2233f078514c4b204fafd93-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9555fcc2c2233f078514c4b204fafd93 columnFamilyName info 2024-12-09T03:23:10,843 DEBUG [StoreOpener-9555fcc2c2233f078514c4b204fafd93-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:23:10,845 INFO [StoreOpener-9555fcc2c2233f078514c4b204fafd93-1 {}] regionserver.HStore(327): Store=9555fcc2c2233f078514c4b204fafd93/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:23:10,845 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:10,846 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:10,847 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:10,848 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:10,848 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:10,850 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:10,853 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:23:10,854 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9555fcc2c2233f078514c4b204fafd93; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799996, jitterRate=0.017247900366783142}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T03:23:10,854 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:10,855 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9555fcc2c2233f078514c4b204fafd93: Running coprocessor pre-open hook at 1733714590838Writing region info on filesystem at 1733714590838Initializing all the Stores at 1733714590840 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714590840Cleaning up temporary data from old regions at 1733714590848 (+8 ms)Running coprocessor post-open hooks at 1733714590854 (+6 ms)Region opened successfully at 1733714590855 (+1 ms) 2024-12-09T03:23:10,857 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93., pid=6, masterSystemTime=1733714590830 2024-12-09T03:23:10,861 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:23:10,861 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:23:10,863 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9555fcc2c2233f078514c4b204fafd93, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,41341,1733714587697 2024-12-09T03:23:10,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9555fcc2c2233f078514c4b204fafd93, server=1617b0b1421f,41341,1733714587697 because future has completed 2024-12-09T03:23:10,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T03:23:10,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9555fcc2c2233f078514c4b204fafd93, server=1617b0b1421f,41341,1733714587697 in 194 msec 2024-12-09T03:23:10,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T03:23:10,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9555fcc2c2233f078514c4b204fafd93, ASSIGN in 360 msec 2024-12-09T03:23:10,878 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T03:23:10,879 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733714590878"}]},"ts":"1733714590878"} 2024-12-09T03:23:10,882 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-09T03:23:10,884 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T03:23:10,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 492 msec 2024-12-09T03:23:15,342 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T03:23:15,385 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T03:23:15,386 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-09T03:23:16,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T03:23:16,747 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T03:23:16,751 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T03:23:16,751 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T03:23:16,753 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:23:16,753 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T03:23:16,754 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T03:23:16,754 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T03:23:20,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:23:20,436 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-09T03:23:20,442 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-09T03:23:20,448 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-09T03:23:20,449 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:23:20,449 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C41341%2C1733714587697.1733714600449 2024-12-09T03:23:20,457 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:20,457 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:20,458 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:20,458 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:20,458 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:20,458 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714589345 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714600449 2024-12-09T03:23:20,460 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33613:33613),(127.0.0.1/127.0.0.1:41059:41059)] 2024-12-09T03:23:20,460 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714589345 is not closed yet, will try archiving it next time 2024-12-09T03:23:20,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741833_1009 (size=451) 2024-12-09T03:23:20,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741833_1009 (size=451) 2024-12-09T03:23:20,463 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714589345 to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/oldWALs/1617b0b1421f%2C41341%2C1733714587697.1733714589345 2024-12-09T03:23:20,468 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93., hostname=1617b0b1421f,41341,1733714587697, seqNum=2] 2024-12-09T03:23:32,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41341 {}] regionserver.HRegion(8855): Flush requested on 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:32,509 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9555fcc2c2233f078514c4b204fafd93 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T03:23:32,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/e0f4886dbc734ad2954b3591e715a5e8 is 1080, key is row0001/info:/1733714600471/Put/seqid=0 2024-12-09T03:23:32,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741838_1014 (size=12509) 2024-12-09T03:23:32,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741838_1014 (size=12509) 2024-12-09T03:23:32,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/e0f4886dbc734ad2954b3591e715a5e8 2024-12-09T03:23:32,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/e0f4886dbc734ad2954b3591e715a5e8 as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/e0f4886dbc734ad2954b3591e715a5e8 2024-12-09T03:23:32,623 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/e0f4886dbc734ad2954b3591e715a5e8, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T03:23:32,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9555fcc2c2233f078514c4b204fafd93 in 121ms, sequenceid=11, compaction requested=false 2024-12-09T03:23:32,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9555fcc2c2233f078514c4b204fafd93: 2024-12-09T03:23:35,399 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T03:23:40,522 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C41341%2C1733714587697.1733714620521 2024-12-09T03:23:40,730 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:23:40,730 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:40,731 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:40,731 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:40,731 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:40,731 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:40,731 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714600449 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714620521 2024-12-09T03:23:40,733 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33613:33613),(127.0.0.1/127.0.0.1:41059:41059)] 2024-12-09T03:23:40,733 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714600449 is not closed yet, will try archiving it next time 2024-12-09T03:23:40,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741837_1013 (size=12399) 2024-12-09T03:23:40,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741837_1013 (size=12399) 2024-12-09T03:23:40,937 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:23:43,142 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:23:45,348 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:23:47,555 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:23:47,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41341 {}] regionserver.HRegion(8855): Flush requested on 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:23:47,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9555fcc2c2233f078514c4b204fafd93 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T03:23:47,759 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:23:47,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/4bf8caa63ef64ce899dfbdad730bf59e is 1080, key is row0008/info:/1733714614507/Put/seqid=0 2024-12-09T03:23:47,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741840_1016 (size=12509) 2024-12-09T03:23:47,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741840_1016 (size=12509) 2024-12-09T03:23:47,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/4bf8caa63ef64ce899dfbdad730bf59e 2024-12-09T03:23:47,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/4bf8caa63ef64ce899dfbdad730bf59e as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/4bf8caa63ef64ce899dfbdad730bf59e 2024-12-09T03:23:47,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/4bf8caa63ef64ce899dfbdad730bf59e, entries=7, sequenceid=21, filesize=12.2 K 2024-12-09T03:23:47,999 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:23:47,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9555fcc2c2233f078514c4b204fafd93 in 442ms, sequenceid=21, compaction requested=false 2024-12-09T03:23:48,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9555fcc2c2233f078514c4b204fafd93: 2024-12-09T03:23:48,000 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-09T03:23:48,001 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:23:48,002 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/e0f4886dbc734ad2954b3591e715a5e8 because midkey is the same as first or last row 2024-12-09T03:23:49,763 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:23:50,610 INFO [master/1617b0b1421f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T03:23:50,610 INFO [master/1617b0b1421f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T03:23:51,970 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:23:51,972 WARN [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:23:51,973 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C41341%2C1733714587697:(num 1733714620521) roll requested 2024-12-09T03:23:51,974 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C41341%2C1733714587697.1733714631974 2024-12-09T03:23:52,186 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 209 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:23:52,186 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:52,187 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:52,187 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:52,187 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:52,187 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:23:52,188 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714620521 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714631974 2024-12-09T03:23:52,189 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41059:41059),(127.0.0.1/127.0.0.1:33613:33613)] 2024-12-09T03:23:52,190 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714620521 is not closed yet, will try archiving it next time 2024-12-09T03:23:52,190 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714600449 to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/oldWALs/1617b0b1421f%2C41341%2C1733714587697.1733714600449 2024-12-09T03:23:52,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741839_1015 (size=7739) 2024-12-09T03:23:52,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741839_1015 (size=7739) 2024-12-09T03:23:54,179 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:23:55,838 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9555fcc2c2233f078514c4b204fafd93, had cached 0 bytes from a total of 25018 2024-12-09T03:23:56,386 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:23:58,593 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:24:00,803 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:24:02,807 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T03:24:02,808 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C41341%2C1733714587697.1733714642808 2024-12-09T03:24:05,400 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T03:24:07,864 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5051 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:24:07,866 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5051 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:24:07,866 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C41341%2C1733714587697:(num 1733714642808) roll requested 2024-12-09T03:24:07,867 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:07,867 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:07,867 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:07,867 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:07,867 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:07,868 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714631974 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714642808 2024-12-09T03:24:07,869 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41059:41059),(127.0.0.1/127.0.0.1:33613:33613)] 2024-12-09T03:24:07,869 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714631974 is not closed yet, will try archiving it next time 2024-12-09T03:24:07,870 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C41341%2C1733714587697.1733714647869 2024-12-09T03:24:07,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741841_1017 (size=4753) 2024-12-09T03:24:07,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741841_1017 (size=4753) 2024-12-09T03:24:12,875 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:24:12,875 WARN [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:24:12,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41341 {}] regionserver.HRegion(8855): Flush requested on 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:24:12,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9555fcc2c2233f078514c4b204fafd93 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T03:24:12,922 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5045 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:24:12,922 WARN [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5045 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:24:14,878 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T03:24:17,941 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5061 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:24:17,941 WARN [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5061 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK], DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK]] 2024-12-09T03:24:17,942 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:17,942 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:17,943 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:17,943 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:17,944 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:17,945 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714642808 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714647869 2024-12-09T03:24:17,947 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33613:33613),(127.0.0.1/127.0.0.1:41059:41059)] 2024-12-09T03:24:17,948 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714642808 is not closed yet, will try archiving it next time 2024-12-09T03:24:17,948 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C41341%2C1733714587697:(num 1733714647869) roll requested 2024-12-09T03:24:17,949 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C41341%2C1733714587697.1733714657948 2024-12-09T03:24:17,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741842_1018 (size=1569) 2024-12-09T03:24:17,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741842_1018 (size=1569) 2024-12-09T03:24:17,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/7beb993af3884009b7087a65d38b12cb is 1080, key is row0015/info:/1733714629559/Put/seqid=0 2024-12-09T03:24:17,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741844_1020 (size=12509) 2024-12-09T03:24:17,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741844_1020 (size=12509) 2024-12-09T03:24:17,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/7beb993af3884009b7087a65d38b12cb 2024-12-09T03:24:17,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/7beb993af3884009b7087a65d38b12cb as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/7beb993af3884009b7087a65d38b12cb 2024-12-09T03:24:17,976 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/7beb993af3884009b7087a65d38b12cb, entries=7, sequenceid=31, filesize=12.2 K 2024-12-09T03:24:23,014 INFO [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5036 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:24:23,014 WARN [FSHLog-0-hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66-prefix:1617b0b1421f,41341,1733714587697 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5036 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:24:23,015 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9555fcc2c2233f078514c4b204fafd93 in 10139ms, sequenceid=31, compaction requested=true 2024-12-09T03:24:23,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9555fcc2c2233f078514c4b204fafd93: 2024-12-09T03:24:23,016 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-09T03:24:23,016 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:24:23,017 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/e0f4886dbc734ad2954b3591e715a5e8 because midkey is the same as first or last row 2024-12-09T03:24:23,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9555fcc2c2233f078514c4b204fafd93:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:24:23,021 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5070 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:24:23,021 WARN [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5070 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45115,DS-0ea3deda-ae3e-4aaf-84c7-3b461b0d6099,DISK], DatanodeInfoWithStorage[127.0.0.1:35417,DS-62da69f9-5abc-4d8f-a735-938a703c0b51,DISK]] 2024-12-09T03:24:23,022 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,022 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,022 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,022 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:24:23,022 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:24:23,022 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,023 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714647869 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714657948 2024-12-09T03:24:23,024 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33613:33613),(127.0.0.1/127.0.0.1:41059:41059)] 2024-12-09T03:24:23,024 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714647869 is not closed yet, will try archiving it next time 2024-12-09T03:24:23,024 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714620521 to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/oldWALs/1617b0b1421f%2C41341%2C1733714587697.1733714620521 2024-12-09T03:24:23,024 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C41341%2C1733714587697:(num 1733714657948) roll requested 2024-12-09T03:24:23,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741843_1019 (size=438) 2024-12-09T03:24:23,025 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C41341%2C1733714587697.1733714663025 2024-12-09T03:24:23,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741843_1019 (size=438) 2024-12-09T03:24:23,026 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714631974 to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/oldWALs/1617b0b1421f%2C41341%2C1733714587697.1733714631974 2024-12-09T03:24:23,027 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:24:23,029 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714642808 to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/oldWALs/1617b0b1421f%2C41341%2C1733714587697.1733714642808 2024-12-09T03:24:23,029 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.HStore(1541): 9555fcc2c2233f078514c4b204fafd93/info is initiating minor compaction (all files) 2024-12-09T03:24:23,029 INFO [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9555fcc2c2233f078514c4b204fafd93/info in TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:24:23,030 INFO [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/e0f4886dbc734ad2954b3591e715a5e8, hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/4bf8caa63ef64ce899dfbdad730bf59e, hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/7beb993af3884009b7087a65d38b12cb] into tmpdir=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp, totalSize=36.6 K 2024-12-09T03:24:23,030 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714647869 to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/oldWALs/1617b0b1421f%2C41341%2C1733714587697.1733714647869 2024-12-09T03:24:23,031 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] compactions.Compactor(225): Compacting e0f4886dbc734ad2954b3591e715a5e8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733714600471 2024-12-09T03:24:23,032 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4bf8caa63ef64ce899dfbdad730bf59e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733714614507 2024-12-09T03:24:23,033 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7beb993af3884009b7087a65d38b12cb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733714629559 2024-12-09T03:24:23,034 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,034 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,034 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,034 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,034 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,034 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714657948 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714663025 2024-12-09T03:24:23,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741845_1021 (size=93) 2024-12-09T03:24:23,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741845_1021 (size=93) 2024-12-09T03:24:23,037 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714657948 to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/oldWALs/1617b0b1421f%2C41341%2C1733714587697.1733714657948 2024-12-09T03:24:23,049 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33613:33613),(127.0.0.1/127.0.0.1:41059:41059)] 2024-12-09T03:24:23,049 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C41341%2C1733714587697.1733714663049 2024-12-09T03:24:23,060 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,060 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,060 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,060 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,060 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:23,061 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714663025 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/WALs/1617b0b1421f,41341,1733714587697/1617b0b1421f%2C41341%2C1733714587697.1733714663049 2024-12-09T03:24:23,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741846_1022 (size=1258) 2024-12-09T03:24:23,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741846_1022 (size=1258) 2024-12-09T03:24:23,070 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41059:41059),(127.0.0.1/127.0.0.1:33613:33613)] 2024-12-09T03:24:23,073 INFO [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9555fcc2c2233f078514c4b204fafd93#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:24:23,074 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/8e40d13829e74b15ad1941ebc485ec81 is 1080, key is row0001/info:/1733714600471/Put/seqid=0 2024-12-09T03:24:23,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741848_1024 (size=27710) 2024-12-09T03:24:23,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741848_1024 (size=27710) 2024-12-09T03:24:23,097 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/8e40d13829e74b15ad1941ebc485ec81 as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/8e40d13829e74b15ad1941ebc485ec81 2024-12-09T03:24:23,114 INFO [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9555fcc2c2233f078514c4b204fafd93/info of 9555fcc2c2233f078514c4b204fafd93 into 8e40d13829e74b15ad1941ebc485ec81(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:24:23,114 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9555fcc2c2233f078514c4b204fafd93: 2024-12-09T03:24:23,116 INFO [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93., storeName=9555fcc2c2233f078514c4b204fafd93/info, priority=13, startTime=1733714663019; duration=0sec 2024-12-09T03:24:23,116 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T03:24:23,116 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:24:23,116 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/8e40d13829e74b15ad1941ebc485ec81 because midkey is the same as first or last row 2024-12-09T03:24:23,117 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T03:24:23,117 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:24:23,117 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/8e40d13829e74b15ad1941ebc485ec81 because midkey is the same as first or last row 2024-12-09T03:24:23,117 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T03:24:23,117 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:24:23,117 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/8e40d13829e74b15ad1941ebc485ec81 because midkey is the same as first or last row 2024-12-09T03:24:23,117 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:24:23,117 DEBUG [RS:0;1617b0b1421f:41341-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9555fcc2c2233f078514c4b204fafd93:info 2024-12-09T03:24:35,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41341 {}] regionserver.HRegion(8855): Flush requested on 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:24:35,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9555fcc2c2233f078514c4b204fafd93 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T03:24:35,099 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/e009cf577e714bf4bd7d08cbb9440e91 is 1080, key is row0022/info:/1733714663051/Put/seqid=0 2024-12-09T03:24:35,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741849_1025 (size=12509) 2024-12-09T03:24:35,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741849_1025 (size=12509) 2024-12-09T03:24:35,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/e009cf577e714bf4bd7d08cbb9440e91 2024-12-09T03:24:35,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/e009cf577e714bf4bd7d08cbb9440e91 as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/e009cf577e714bf4bd7d08cbb9440e91 2024-12-09T03:24:35,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/e009cf577e714bf4bd7d08cbb9440e91, entries=7, sequenceid=42, filesize=12.2 K 2024-12-09T03:24:35,124 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9555fcc2c2233f078514c4b204fafd93 in 32ms, sequenceid=42, compaction requested=false 2024-12-09T03:24:35,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9555fcc2c2233f078514c4b204fafd93: 2024-12-09T03:24:35,125 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-09T03:24:35,125 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:24:35,125 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/8e40d13829e74b15ad1941ebc485ec81 because midkey is the same as first or last row 2024-12-09T03:24:35,400 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T03:24:40,839 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9555fcc2c2233f078514c4b204fafd93, had cached 0 bytes from a total of 40219 2024-12-09T03:24:43,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T03:24:43,114 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:24:43,115 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:24:43,123 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:43,124 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:43,124 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T03:24:43,124 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T03:24:43,124 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1349950594, stopped=false 2024-12-09T03:24:43,124 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1617b0b1421f,43563,1733714586347 2024-12-09T03:24:43,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:24:43,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:24:43,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:43,185 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:24:43,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:43,186 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:24:43,186 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:24:43,186 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:24:43,186 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:24:43,186 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:43,187 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1617b0b1421f,41341,1733714587697' ***** 2024-12-09T03:24:43,187 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:24:43,188 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:24:43,188 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:24:43,188 INFO [RS:0;1617b0b1421f:41341 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:24:43,189 INFO [RS:0;1617b0b1421f:41341 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:24:43,189 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(3091): Received CLOSE for 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:24:43,190 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(959): stopping server 1617b0b1421f,41341,1733714587697 2024-12-09T03:24:43,190 INFO [RS:0;1617b0b1421f:41341 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:24:43,190 INFO [RS:0;1617b0b1421f:41341 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1617b0b1421f:41341. 2024-12-09T03:24:43,191 DEBUG [RS:0;1617b0b1421f:41341 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:24:43,191 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9555fcc2c2233f078514c4b204fafd93, disabling compactions & flushes 2024-12-09T03:24:43,191 DEBUG [RS:0;1617b0b1421f:41341 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:43,191 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:24:43,191 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:24:43,191 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. after waiting 0 ms 2024-12-09T03:24:43,191 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:24:43,191 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:24:43,191 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:24:43,191 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:24:43,192 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 9555fcc2c2233f078514c4b204fafd93 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-09T03:24:43,192 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T03:24:43,192 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T03:24:43,193 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:24:43,193 DEBUG [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(1325): Online Regions={9555fcc2c2233f078514c4b204fafd93=TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T03:24:43,193 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:24:43,193 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:24:43,193 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:24:43,193 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:24:43,193 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T03:24:43,193 DEBUG [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9555fcc2c2233f078514c4b204fafd93 2024-12-09T03:24:43,193 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-09T03:24:43,193 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T03:24:43,199 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/743fbbcb7fb540b9940a92dac7b18f53 is 1080, key is row0029/info:/1733714677097/Put/seqid=0 2024-12-09T03:24:43,199 INFO [regionserver/1617b0b1421f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:24:43,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741850_1026 (size=8193) 2024-12-09T03:24:43,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741850_1026 (size=8193) 2024-12-09T03:24:43,206 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/743fbbcb7fb540b9940a92dac7b18f53 2024-12-09T03:24:43,214 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/.tmp/info/f3f0632f7fff4d5eb3b3ad991b6d51a8 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93./info:regioninfo/1733714590863/Put/seqid=0 2024-12-09T03:24:43,215 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/.tmp/info/743fbbcb7fb540b9940a92dac7b18f53 as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/743fbbcb7fb540b9940a92dac7b18f53 2024-12-09T03:24:43,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741851_1027 (size=7016) 2024-12-09T03:24:43,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741851_1027 (size=7016) 2024-12-09T03:24:43,224 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/.tmp/info/f3f0632f7fff4d5eb3b3ad991b6d51a8 2024-12-09T03:24:43,225 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/743fbbcb7fb540b9940a92dac7b18f53, entries=3, sequenceid=48, filesize=8.0 K 2024-12-09T03:24:43,227 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9555fcc2c2233f078514c4b204fafd93 in 35ms, sequenceid=48, compaction requested=true 2024-12-09T03:24:43,227 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/e0f4886dbc734ad2954b3591e715a5e8, hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/4bf8caa63ef64ce899dfbdad730bf59e, hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/7beb993af3884009b7087a65d38b12cb] to archive 2024-12-09T03:24:43,230 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T03:24:43,233 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/e0f4886dbc734ad2954b3591e715a5e8 to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/e0f4886dbc734ad2954b3591e715a5e8 2024-12-09T03:24:43,235 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/4bf8caa63ef64ce899dfbdad730bf59e to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/4bf8caa63ef64ce899dfbdad730bf59e 2024-12-09T03:24:43,237 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/7beb993af3884009b7087a65d38b12cb to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/info/7beb993af3884009b7087a65d38b12cb 2024-12-09T03:24:43,248 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/.tmp/ns/467fd604ead741b2908cc4ccf1387890 is 43, key is default/ns:d/1733714590069/Put/seqid=0 2024-12-09T03:24:43,247 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1617b0b1421f:43563 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T03:24:43,252 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e0f4886dbc734ad2954b3591e715a5e8=12509, 4bf8caa63ef64ce899dfbdad730bf59e=12509, 7beb993af3884009b7087a65d38b12cb=12509] 2024-12-09T03:24:43,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741852_1028 (size=5153) 2024-12-09T03:24:43,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741852_1028 (size=5153) 2024-12-09T03:24:43,254 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/.tmp/ns/467fd604ead741b2908cc4ccf1387890 2024-12-09T03:24:43,257 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/default/TestLogRolling-testSlowSyncLogRolling/9555fcc2c2233f078514c4b204fafd93/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-09T03:24:43,259 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:24:43,259 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9555fcc2c2233f078514c4b204fafd93: Waiting for close lock at 1733714683190Running coprocessor pre-close hooks at 1733714683191 (+1 ms)Disabling compacts and flushes for region at 1733714683191Disabling writes for close at 1733714683191Obtaining lock to block concurrent updates at 1733714683192 (+1 ms)Preparing flush snapshotting stores in 9555fcc2c2233f078514c4b204fafd93 at 1733714683192Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733714683192Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. at 1733714683193 (+1 ms)Flushing 9555fcc2c2233f078514c4b204fafd93/info: creating writer at 1733714683194 (+1 ms)Flushing 9555fcc2c2233f078514c4b204fafd93/info: appending metadata at 1733714683198 (+4 ms)Flushing 9555fcc2c2233f078514c4b204fafd93/info: closing flushed file at 1733714683198Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42e9672c: reopening flushed file at 1733714683214 (+16 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9555fcc2c2233f078514c4b204fafd93 in 35ms, sequenceid=48, compaction requested=true at 1733714683227 (+13 ms)Writing region close event to WAL at 1733714683252 (+25 ms)Running coprocessor post-close hooks at 1733714683257 (+5 ms)Closed at 1733714683259 (+2 ms) 2024-12-09T03:24:43,260 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733714590380.9555fcc2c2233f078514c4b204fafd93. 2024-12-09T03:24:43,276 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/.tmp/table/18c805b1aa31452b956a843b17e85cb5 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733714590878/Put/seqid=0 2024-12-09T03:24:43,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741853_1029 (size=5396) 2024-12-09T03:24:43,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741853_1029 (size=5396) 2024-12-09T03:24:43,283 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/.tmp/table/18c805b1aa31452b956a843b17e85cb5 2024-12-09T03:24:43,292 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/.tmp/info/f3f0632f7fff4d5eb3b3ad991b6d51a8 as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/info/f3f0632f7fff4d5eb3b3ad991b6d51a8 2024-12-09T03:24:43,301 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/info/f3f0632f7fff4d5eb3b3ad991b6d51a8, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T03:24:43,303 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/.tmp/ns/467fd604ead741b2908cc4ccf1387890 as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/ns/467fd604ead741b2908cc4ccf1387890 2024-12-09T03:24:43,310 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/ns/467fd604ead741b2908cc4ccf1387890, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T03:24:43,312 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/.tmp/table/18c805b1aa31452b956a843b17e85cb5 as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/table/18c805b1aa31452b956a843b17e85cb5 2024-12-09T03:24:43,319 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/table/18c805b1aa31452b956a843b17e85cb5, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T03:24:43,320 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 127ms, sequenceid=11, compaction requested=false 2024-12-09T03:24:43,326 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T03:24:43,326 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:24:43,326 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:24:43,327 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714683192Running coprocessor pre-close hooks at 1733714683192Disabling compacts and flushes for region at 1733714683192Disabling writes for close at 1733714683193 (+1 ms)Obtaining lock to block concurrent updates at 1733714683193Preparing flush snapshotting stores in 1588230740 at 1733714683193Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733714683194 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733714683195 (+1 ms)Flushing 1588230740/info: creating writer at 1733714683196 (+1 ms)Flushing 1588230740/info: appending metadata at 1733714683214 (+18 ms)Flushing 1588230740/info: closing flushed file at 1733714683214Flushing 1588230740/ns: creating writer at 1733714683233 (+19 ms)Flushing 1588230740/ns: appending metadata at 1733714683247 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733714683247Flushing 1588230740/table: creating writer at 1733714683262 (+15 ms)Flushing 1588230740/table: appending metadata at 1733714683275 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733714683275Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@495056c0: reopening flushed file at 1733714683291 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f03714c: reopening flushed file at 1733714683302 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67a3f823: reopening flushed file at 1733714683310 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 127ms, sequenceid=11, compaction requested=false at 1733714683320 (+10 ms)Writing region close event to WAL at 1733714683321 (+1 ms)Running coprocessor post-close hooks at 1733714683326 (+5 ms)Closed at 1733714683326 2024-12-09T03:24:43,327 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T03:24:43,394 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(976): stopping server 1617b0b1421f,41341,1733714587697; all regions closed. 2024-12-09T03:24:43,397 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:43,397 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:43,397 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:43,398 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:43,398 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:43,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741834_1010 (size=3066) 2024-12-09T03:24:43,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741834_1010 (size=3066) 2024-12-09T03:24:43,408 DEBUG [RS:0;1617b0b1421f:41341 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/oldWALs 2024-12-09T03:24:43,408 INFO [RS:0;1617b0b1421f:41341 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C41341%2C1733714587697.meta:.meta(num 1733714589804) 2024-12-09T03:24:43,409 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:43,409 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:43,409 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:43,409 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:43,409 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:43,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741847_1023 (size=12695) 2024-12-09T03:24:43,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741847_1023 (size=12695) 2024-12-09T03:24:43,415 DEBUG [RS:0;1617b0b1421f:41341 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/oldWALs 2024-12-09T03:24:43,415 INFO [RS:0;1617b0b1421f:41341 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C41341%2C1733714587697:(num 1733714663049) 2024-12-09T03:24:43,415 DEBUG [RS:0;1617b0b1421f:41341 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:43,415 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:24:43,415 INFO [RS:0;1617b0b1421f:41341 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:24:43,415 INFO [RS:0;1617b0b1421f:41341 {}] hbase.ChoreService(370): Chore service for: regionserver/1617b0b1421f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T03:24:43,415 INFO [RS:0;1617b0b1421f:41341 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:24:43,415 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:24:43,416 INFO [RS:0;1617b0b1421f:41341 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41341 2024-12-09T03:24:43,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:24:43,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1617b0b1421f,41341,1733714587697 2024-12-09T03:24:43,427 INFO [RS:0;1617b0b1421f:41341 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:24:43,438 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1617b0b1421f,41341,1733714587697] 2024-12-09T03:24:43,448 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1617b0b1421f,41341,1733714587697 already deleted, retry=false 2024-12-09T03:24:43,448 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1617b0b1421f,41341,1733714587697 expired; onlineServers=0 2024-12-09T03:24:43,448 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1617b0b1421f,43563,1733714586347' ***** 2024-12-09T03:24:43,448 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T03:24:43,449 INFO [M:0;1617b0b1421f:43563 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:24:43,449 INFO [M:0;1617b0b1421f:43563 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:24:43,449 DEBUG [M:0;1617b0b1421f:43563 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T03:24:43,449 DEBUG [M:0;1617b0b1421f:43563 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T03:24:43,449 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T03:24:43,449 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714589059 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714589059,5,FailOnTimeoutGroup] 2024-12-09T03:24:43,449 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714589059 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714589059,5,FailOnTimeoutGroup] 2024-12-09T03:24:43,450 INFO [M:0;1617b0b1421f:43563 {}] hbase.ChoreService(370): Chore service for: master/1617b0b1421f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T03:24:43,450 INFO [M:0;1617b0b1421f:43563 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:24:43,451 DEBUG [M:0;1617b0b1421f:43563 {}] master.HMaster(1795): Stopping service threads 2024-12-09T03:24:43,451 INFO [M:0;1617b0b1421f:43563 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T03:24:43,451 INFO [M:0;1617b0b1421f:43563 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:24:43,452 INFO [M:0;1617b0b1421f:43563 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T03:24:43,452 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T03:24:43,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T03:24:43,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:43,458 DEBUG [M:0;1617b0b1421f:43563 {}] zookeeper.ZKUtil(347): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T03:24:43,458 WARN [M:0;1617b0b1421f:43563 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T03:24:43,459 INFO [M:0;1617b0b1421f:43563 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/.lastflushedseqids 2024-12-09T03:24:43,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741854_1030 (size=130) 2024-12-09T03:24:43,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741854_1030 (size=130) 2024-12-09T03:24:43,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:24:43,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41341-0x10008992e490001, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:24:43,538 INFO [RS:0;1617b0b1421f:41341 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:24:43,539 INFO [RS:0;1617b0b1421f:41341 {}] regionserver.HRegionServer(1031): Exiting; stopping=1617b0b1421f,41341,1733714587697; zookeeper connection closed. 2024-12-09T03:24:43,539 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1ddf13f3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1ddf13f3 2024-12-09T03:24:43,540 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T03:24:43,872 INFO [M:0;1617b0b1421f:43563 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T03:24:43,873 INFO [M:0;1617b0b1421f:43563 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T03:24:43,873 DEBUG [M:0;1617b0b1421f:43563 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:24:43,873 INFO [M:0;1617b0b1421f:43563 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:43,873 DEBUG [M:0;1617b0b1421f:43563 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:43,873 DEBUG [M:0;1617b0b1421f:43563 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:24:43,873 DEBUG [M:0;1617b0b1421f:43563 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:43,874 INFO [M:0;1617b0b1421f:43563 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-12-09T03:24:43,897 DEBUG [M:0;1617b0b1421f:43563 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c816f768a9a243839e26ad82f7b38521 is 82, key is hbase:meta,,1/info:regioninfo/1733714589893/Put/seqid=0 2024-12-09T03:24:43,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741855_1031 (size=5672) 2024-12-09T03:24:43,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741855_1031 (size=5672) 2024-12-09T03:24:43,904 INFO [M:0;1617b0b1421f:43563 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c816f768a9a243839e26ad82f7b38521 2024-12-09T03:24:43,925 DEBUG [M:0;1617b0b1421f:43563 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c920608542f94f658c823d88928fb3e6 is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733714590886/Put/seqid=0 2024-12-09T03:24:43,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741856_1032 (size=6246) 2024-12-09T03:24:43,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741856_1032 (size=6246) 2024-12-09T03:24:44,335 INFO [M:0;1617b0b1421f:43563 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c920608542f94f658c823d88928fb3e6 2024-12-09T03:24:44,348 INFO [M:0;1617b0b1421f:43563 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c920608542f94f658c823d88928fb3e6 2024-12-09T03:24:44,366 DEBUG [M:0;1617b0b1421f:43563 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7e1bd61ac13b457f9fcbbf391f3901a0 is 69, key is 1617b0b1421f,41341,1733714587697/rs:state/1733714589115/Put/seqid=0 2024-12-09T03:24:44,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741857_1033 (size=5156) 2024-12-09T03:24:44,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741857_1033 (size=5156) 2024-12-09T03:24:44,374 INFO [M:0;1617b0b1421f:43563 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7e1bd61ac13b457f9fcbbf391f3901a0 2024-12-09T03:24:44,398 DEBUG [M:0;1617b0b1421f:43563 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4ee296d38c1e48de981498b2a0ad4c92 is 52, key is load_balancer_on/state:d/1733714590337/Put/seqid=0 2024-12-09T03:24:44,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741858_1034 (size=5056) 2024-12-09T03:24:44,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741858_1034 (size=5056) 2024-12-09T03:24:44,405 INFO [M:0;1617b0b1421f:43563 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4ee296d38c1e48de981498b2a0ad4c92 2024-12-09T03:24:44,414 DEBUG [M:0;1617b0b1421f:43563 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c816f768a9a243839e26ad82f7b38521 as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c816f768a9a243839e26ad82f7b38521 2024-12-09T03:24:44,422 INFO [M:0;1617b0b1421f:43563 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c816f768a9a243839e26ad82f7b38521, entries=8, sequenceid=59, filesize=5.5 K 2024-12-09T03:24:44,423 DEBUG [M:0;1617b0b1421f:43563 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c920608542f94f658c823d88928fb3e6 as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c920608542f94f658c823d88928fb3e6 2024-12-09T03:24:44,431 INFO [M:0;1617b0b1421f:43563 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c920608542f94f658c823d88928fb3e6 2024-12-09T03:24:44,431 INFO [M:0;1617b0b1421f:43563 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c920608542f94f658c823d88928fb3e6, entries=6, sequenceid=59, filesize=6.1 K 2024-12-09T03:24:44,433 DEBUG [M:0;1617b0b1421f:43563 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7e1bd61ac13b457f9fcbbf391f3901a0 as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7e1bd61ac13b457f9fcbbf391f3901a0 2024-12-09T03:24:44,441 INFO [M:0;1617b0b1421f:43563 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7e1bd61ac13b457f9fcbbf391f3901a0, entries=1, sequenceid=59, filesize=5.0 K 2024-12-09T03:24:44,442 DEBUG [M:0;1617b0b1421f:43563 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4ee296d38c1e48de981498b2a0ad4c92 as hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4ee296d38c1e48de981498b2a0ad4c92 2024-12-09T03:24:44,448 INFO [M:0;1617b0b1421f:43563 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4ee296d38c1e48de981498b2a0ad4c92, entries=1, sequenceid=59, filesize=4.9 K 2024-12-09T03:24:44,450 INFO [M:0;1617b0b1421f:43563 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 577ms, sequenceid=59, compaction requested=false 2024-12-09T03:24:44,451 INFO [M:0;1617b0b1421f:43563 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:44,452 DEBUG [M:0;1617b0b1421f:43563 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714683873Disabling compacts and flushes for region at 1733714683873Disabling writes for close at 1733714683873Obtaining lock to block concurrent updates at 1733714683874 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733714683874Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1733714683875 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733714683877 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733714683877Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733714683896 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733714683896Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733714683910 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733714683925 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733714683925Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733714684349 (+424 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733714684366 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733714684366Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733714684382 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733714684398 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733714684398Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b4974d9: reopening flushed file at 1733714684412 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b9966c9: reopening flushed file at 1733714684422 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45d88808: reopening flushed file at 1733714684431 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f8bf183: reopening flushed file at 1733714684441 (+10 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 577ms, sequenceid=59, compaction requested=false at 1733714684450 (+9 ms)Writing region close event to WAL at 1733714684451 (+1 ms)Closed at 1733714684451 2024-12-09T03:24:44,452 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:44,452 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:44,453 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:44,453 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:44,453 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:44,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45115 is added to blk_1073741830_1006 (size=27961) 2024-12-09T03:24:44,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35417 is added to blk_1073741830_1006 (size=27961) 2024-12-09T03:24:44,456 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:24:44,456 INFO [M:0;1617b0b1421f:43563 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T03:24:44,457 INFO [M:0;1617b0b1421f:43563 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43563 2024-12-09T03:24:44,457 INFO [M:0;1617b0b1421f:43563 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:24:44,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:24:44,601 INFO [M:0;1617b0b1421f:43563 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:24:44,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43563-0x10008992e490000, quorum=127.0.0.1:64205, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:24:44,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c963ecd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:24:44,640 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a10aed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:24:44,641 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:24:44,641 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25ca9bb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:24:44,641 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ff5148a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/hadoop.log.dir/,STOPPED} 2024-12-09T03:24:44,645 WARN [BP-1780146639-172.17.0.3-1733714581617 heartbeating to localhost/127.0.0.1:40037 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:24:44,645 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:24:44,645 WARN [BP-1780146639-172.17.0.3-1733714581617 heartbeating to localhost/127.0.0.1:40037 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1780146639-172.17.0.3-1733714581617 (Datanode Uuid 4cab0383-56dc-4838-a75a-d5e3d05c60e6) service to localhost/127.0.0.1:40037 2024-12-09T03:24:44,645 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:24:44,646 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85/data/data3/current/BP-1780146639-172.17.0.3-1733714581617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:24:44,646 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85/data/data4/current/BP-1780146639-172.17.0.3-1733714581617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:24:44,647 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:24:44,649 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f93babe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:24:44,650 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@737d6c99{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:24:44,650 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:24:44,650 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7893eb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:24:44,650 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3305dd74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/hadoop.log.dir/,STOPPED} 2024-12-09T03:24:44,651 WARN [BP-1780146639-172.17.0.3-1733714581617 heartbeating to localhost/127.0.0.1:40037 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:24:44,651 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:24:44,651 WARN [BP-1780146639-172.17.0.3-1733714581617 heartbeating to localhost/127.0.0.1:40037 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1780146639-172.17.0.3-1733714581617 (Datanode Uuid 7c6efea1-15c1-40fa-87b3-8c409a3eddb6) service to localhost/127.0.0.1:40037 2024-12-09T03:24:44,651 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:24:44,652 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85/data/data1/current/BP-1780146639-172.17.0.3-1733714581617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:24:44,652 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/cluster_2d046876-6d08-f417-8711-e69390dcac85/data/data2/current/BP-1780146639-172.17.0.3-1733714581617 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:24:44,653 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:24:44,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de997b9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:24:44,661 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:24:44,661 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:24:44,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:24:44,661 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/hadoop.log.dir/,STOPPED} 2024-12-09T03:24:44,669 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T03:24:44,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T03:24:44,704 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40037 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:40037 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/1617b0b1421f:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@11446de8 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/1617b0b1421f:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:40037 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:40037 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:40037 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40037 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/1617b0b1421f:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40037 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40037 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=144 (was 241), ProcessCount=11 (was 11), AvailableMemoryMB=6314 (was 6092) - AvailableMemoryMB LEAK? - 2024-12-09T03:24:44,710 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=144, ProcessCount=11, AvailableMemoryMB=6313 2024-12-09T03:24:44,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T03:24:44,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/hadoop.log.dir so I do NOT create it in target/test-data/81d2985b-abce-2401-771d-315b23e3922e 2024-12-09T03:24:44,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a5b53dae-6bd8-159f-78cb-4d1c287394f1/hadoop.tmp.dir so I do NOT create it in target/test-data/81d2985b-abce-2401-771d-315b23e3922e 2024-12-09T03:24:44,710 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29, deleteOnExit=true 2024-12-09T03:24:44,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T03:24:44,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/test.cache.data in system properties and HBase conf 2024-12-09T03:24:44,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T03:24:44,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/hadoop.log.dir in system properties and HBase conf 2024-12-09T03:24:44,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T03:24:44,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T03:24:44,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T03:24:44,711 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T03:24:44,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:24:44,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:24:44,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T03:24:44,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:24:44,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T03:24:44,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T03:24:44,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:24:44,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:24:44,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T03:24:44,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/nfs.dump.dir in system properties and HBase conf 2024-12-09T03:24:44,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/java.io.tmpdir in system properties and HBase conf 2024-12-09T03:24:44,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:24:44,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T03:24:44,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T03:24:44,725 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:24:45,089 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:24:45,095 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:24:45,096 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:24:45,096 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:24:45,096 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:24:45,097 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:24:45,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4340a53c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:24:45,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c8f0dfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:24:45,190 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b839c20{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/java.io.tmpdir/jetty-localhost-36469-hadoop-hdfs-3_4_1-tests_jar-_-any-3695367672262048206/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:24:45,191 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d00b522{HTTP/1.1, (http/1.1)}{localhost:36469} 2024-12-09T03:24:45,191 INFO [Time-limited test {}] server.Server(415): Started @105441ms 2024-12-09T03:24:45,203 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:24:45,528 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:24:45,533 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:24:45,534 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:24:45,534 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:24:45,534 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:24:45,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f754f75{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:24:45,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3a779{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:24:45,626 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12742a74{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/java.io.tmpdir/jetty-localhost-44227-hadoop-hdfs-3_4_1-tests_jar-_-any-14304071407101486946/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:24:45,626 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f1e2b4c{HTTP/1.1, (http/1.1)}{localhost:44227} 2024-12-09T03:24:45,626 INFO [Time-limited test {}] server.Server(415): Started @105876ms 2024-12-09T03:24:45,628 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:24:45,658 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:24:45,661 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:24:45,662 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:24:45,662 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:24:45,662 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:24:45,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@592c88e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:24:45,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c5ed954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:24:45,752 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62c9cc57{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/java.io.tmpdir/jetty-localhost-35359-hadoop-hdfs-3_4_1-tests_jar-_-any-16564901311846869440/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:24:45,752 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3756c399{HTTP/1.1, (http/1.1)}{localhost:35359} 2024-12-09T03:24:45,752 INFO [Time-limited test {}] server.Server(415): Started @106002ms 2024-12-09T03:24:45,754 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:24:46,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:24:46,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T03:24:46,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T03:24:46,751 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T03:24:47,141 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29/data/data1/current/BP-533771429-172.17.0.3-1733714684736/current, will proceed with Du for space computation calculation, 2024-12-09T03:24:47,141 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29/data/data2/current/BP-533771429-172.17.0.3-1733714684736/current, will proceed with Du for space computation calculation, 2024-12-09T03:24:47,162 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:24:47,164 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x182e13fa057c56e4 with lease ID 0xf474b2198218732b: Processing first storage report for DS-92fd5051-3f6b-40ca-ae0e-7ec795fbe8f2 from datanode DatanodeRegistration(127.0.0.1:44087, datanodeUuid=94a5beb5-168f-478e-bdf5-305bb2b7d7e5, infoPort=43361, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1500818035;c=1733714684736) 2024-12-09T03:24:47,165 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x182e13fa057c56e4 with lease ID 0xf474b2198218732b: from storage DS-92fd5051-3f6b-40ca-ae0e-7ec795fbe8f2 node DatanodeRegistration(127.0.0.1:44087, datanodeUuid=94a5beb5-168f-478e-bdf5-305bb2b7d7e5, infoPort=43361, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1500818035;c=1733714684736), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T03:24:47,165 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x182e13fa057c56e4 with lease ID 0xf474b2198218732b: Processing first storage report for DS-5af014bb-fb5d-4c5d-b6e0-5b77a189d7c4 from datanode DatanodeRegistration(127.0.0.1:44087, datanodeUuid=94a5beb5-168f-478e-bdf5-305bb2b7d7e5, infoPort=43361, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1500818035;c=1733714684736) 2024-12-09T03:24:47,165 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x182e13fa057c56e4 with lease ID 0xf474b2198218732b: from storage DS-5af014bb-fb5d-4c5d-b6e0-5b77a189d7c4 node DatanodeRegistration(127.0.0.1:44087, datanodeUuid=94a5beb5-168f-478e-bdf5-305bb2b7d7e5, infoPort=43361, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1500818035;c=1733714684736), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:24:47,182 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29/data/data3/current/BP-533771429-172.17.0.3-1733714684736/current, will proceed with Du for space computation calculation, 2024-12-09T03:24:47,182 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29/data/data4/current/BP-533771429-172.17.0.3-1733714684736/current, will proceed with Du for space computation calculation, 2024-12-09T03:24:47,199 WARN [Thread-439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:24:47,201 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8bf6b25b92a5a751 with lease ID 0xf474b2198218732c: Processing first storage report for DS-09008a8d-091c-4490-ba7c-d81706fb8e9f from datanode DatanodeRegistration(127.0.0.1:42215, datanodeUuid=6f401ca3-313a-4c75-a094-b543eddd3316, infoPort=43613, infoSecurePort=0, ipcPort=38965, storageInfo=lv=-57;cid=testClusterID;nsid=1500818035;c=1733714684736) 2024-12-09T03:24:47,201 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8bf6b25b92a5a751 with lease ID 0xf474b2198218732c: from storage DS-09008a8d-091c-4490-ba7c-d81706fb8e9f node DatanodeRegistration(127.0.0.1:42215, datanodeUuid=6f401ca3-313a-4c75-a094-b543eddd3316, infoPort=43613, infoSecurePort=0, ipcPort=38965, storageInfo=lv=-57;cid=testClusterID;nsid=1500818035;c=1733714684736), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:24:47,201 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8bf6b25b92a5a751 with lease ID 0xf474b2198218732c: Processing first storage report for DS-ab9ff768-0d00-46e8-838e-dc65960c719f from datanode DatanodeRegistration(127.0.0.1:42215, datanodeUuid=6f401ca3-313a-4c75-a094-b543eddd3316, infoPort=43613, infoSecurePort=0, ipcPort=38965, storageInfo=lv=-57;cid=testClusterID;nsid=1500818035;c=1733714684736) 2024-12-09T03:24:47,201 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8bf6b25b92a5a751 with lease ID 0xf474b2198218732c: from storage DS-ab9ff768-0d00-46e8-838e-dc65960c719f node DatanodeRegistration(127.0.0.1:42215, datanodeUuid=6f401ca3-313a-4c75-a094-b543eddd3316, infoPort=43613, infoSecurePort=0, ipcPort=38965, storageInfo=lv=-57;cid=testClusterID;nsid=1500818035;c=1733714684736), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:24:47,304 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e 2024-12-09T03:24:47,308 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29/zookeeper_0, clientPort=50017, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T03:24:47,308 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50017 2024-12-09T03:24:47,309 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:47,310 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:47,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:24:47,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:24:47,322 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d with version=8 2024-12-09T03:24:47,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/hbase-staging 2024-12-09T03:24:47,325 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:24:47,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:47,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:47,325 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:24:47,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:47,325 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:24:47,326 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T03:24:47,326 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:24:47,327 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44773 2024-12-09T03:24:47,328 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44773 connecting to ZooKeeper ensemble=127.0.0.1:50017 2024-12-09T03:24:47,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:447730x0, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:24:47,404 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44773-0x100089abc120000 connected 2024-12-09T03:24:47,502 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:47,508 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:47,511 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:24:47,511 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d, hbase.cluster.distributed=false 2024-12-09T03:24:47,513 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:24:47,513 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44773 2024-12-09T03:24:47,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44773 2024-12-09T03:24:47,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44773 2024-12-09T03:24:47,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44773 2024-12-09T03:24:47,515 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44773 2024-12-09T03:24:47,531 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:24:47,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:47,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:47,531 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:24:47,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:47,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:24:47,531 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:24:47,531 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:24:47,532 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35041 2024-12-09T03:24:47,533 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35041 connecting to ZooKeeper ensemble=127.0.0.1:50017 2024-12-09T03:24:47,534 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:47,536 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:47,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350410x0, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:24:47,549 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:350410x0, quorum=127.0.0.1:50017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:24:47,549 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35041-0x100089abc120001 connected 2024-12-09T03:24:47,550 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:24:47,550 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:24:47,551 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:24:47,552 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:24:47,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35041 2024-12-09T03:24:47,555 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35041 2024-12-09T03:24:47,556 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35041 2024-12-09T03:24:47,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35041 2024-12-09T03:24:47,558 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35041 2024-12-09T03:24:47,575 DEBUG [M:0;1617b0b1421f:44773 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1617b0b1421f:44773 2024-12-09T03:24:47,575 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1617b0b1421f,44773,1733714687325 2024-12-09T03:24:47,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:24:47,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:24:47,585 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1617b0b1421f,44773,1733714687325 2024-12-09T03:24:47,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:24:47,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:47,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:47,595 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T03:24:47,596 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1617b0b1421f,44773,1733714687325 from backup master directory 2024-12-09T03:24:47,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1617b0b1421f,44773,1733714687325 2024-12-09T03:24:47,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:24:47,605 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:24:47,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:24:47,605 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1617b0b1421f,44773,1733714687325 2024-12-09T03:24:47,610 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/hbase.id] with ID: 7c41c318-8439-42c4-a38c-bea1eac43988 2024-12-09T03:24:47,610 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/.tmp/hbase.id 2024-12-09T03:24:47,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:24:47,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:24:47,617 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/.tmp/hbase.id]:[hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/hbase.id] 2024-12-09T03:24:47,630 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:47,630 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T03:24:47,632 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T03:24:47,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:47,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:47,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:24:47,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:24:47,651 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:24:47,652 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T03:24:47,653 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:24:47,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:24:47,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:24:47,665 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store 2024-12-09T03:24:47,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:24:47,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:24:47,675 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:24:47,675 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:24:47,675 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:47,675 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:47,675 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:24:47,675 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:47,676 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:47,676 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714687675Disabling compacts and flushes for region at 1733714687675Disabling writes for close at 1733714687675Writing region close event to WAL at 1733714687675Closed at 1733714687675 2024-12-09T03:24:47,677 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/.initializing 2024-12-09T03:24:47,677 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/WALs/1617b0b1421f,44773,1733714687325 2024-12-09T03:24:47,681 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C44773%2C1733714687325, suffix=, logDir=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/WALs/1617b0b1421f,44773,1733714687325, archiveDir=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/oldWALs, maxLogs=10 2024-12-09T03:24:47,682 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C44773%2C1733714687325.1733714687681 2024-12-09T03:24:47,687 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/WALs/1617b0b1421f,44773,1733714687325/1617b0b1421f%2C44773%2C1733714687325.1733714687681 2024-12-09T03:24:47,688 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43361:43361),(127.0.0.1/127.0.0.1:43613:43613)] 2024-12-09T03:24:47,688 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:24:47,688 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:24:47,689 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,689 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,691 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,693 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T03:24:47,693 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:47,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:47,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,695 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T03:24:47,695 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:47,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:24:47,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T03:24:47,698 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:47,699 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:24:47,699 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,700 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T03:24:47,701 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:47,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:24:47,701 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,702 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,702 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,704 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,704 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,704 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:24:47,706 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:47,708 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:24:47,708 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=749864, jitterRate=-0.04649871587753296}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:24:47,710 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733714687689Initializing all the Stores at 1733714687690 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714687690Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714687691 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714687691Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714687691Cleaning up temporary data from old regions at 1733714687704 (+13 ms)Region opened successfully at 1733714687709 (+5 ms) 2024-12-09T03:24:47,710 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T03:24:47,714 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@490b1fd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:24:47,714 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T03:24:47,715 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T03:24:47,715 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T03:24:47,715 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T03:24:47,715 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T03:24:47,716 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T03:24:47,716 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T03:24:47,719 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T03:24:47,719 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T03:24:47,731 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T03:24:47,732 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T03:24:47,732 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T03:24:47,742 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T03:24:47,742 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T03:24:47,744 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T03:24:47,752 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T03:24:47,754 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T03:24:47,763 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T03:24:47,768 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T03:24:47,780 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T03:24:47,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:24:47,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:24:47,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:47,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:47,793 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1617b0b1421f,44773,1733714687325, sessionid=0x100089abc120000, setting cluster-up flag (Was=false) 2024-12-09T03:24:47,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:47,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:47,848 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T03:24:47,853 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,44773,1733714687325 2024-12-09T03:24:47,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:47,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:47,911 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T03:24:47,915 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,44773,1733714687325 2024-12-09T03:24:47,919 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T03:24:47,922 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T03:24:47,922 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T03:24:47,923 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T03:24:47,923 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1617b0b1421f,44773,1733714687325 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T03:24:47,927 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:24:47,927 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:24:47,927 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:24:47,927 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:24:47,928 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1617b0b1421f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T03:24:47,928 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:47,928 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:24:47,928 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:47,929 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733714717929 2024-12-09T03:24:47,929 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T03:24:47,929 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T03:24:47,929 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T03:24:47,929 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T03:24:47,929 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T03:24:47,929 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T03:24:47,930 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:47,930 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T03:24:47,930 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:24:47,930 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T03:24:47,930 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T03:24:47,930 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T03:24:47,930 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T03:24:47,930 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T03:24:47,931 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714687930,5,FailOnTimeoutGroup] 2024-12-09T03:24:47,931 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714687931,5,FailOnTimeoutGroup] 2024-12-09T03:24:47,931 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:47,931 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T03:24:47,931 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:47,931 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:47,931 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:47,931 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T03:24:47,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:24:47,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:24:47,939 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T03:24:47,939 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d 2024-12-09T03:24:47,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:24:47,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:24:47,948 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:24:47,950 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:24:47,952 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:24:47,952 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:47,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:47,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:24:47,954 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:24:47,954 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:47,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:47,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:24:47,956 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:24:47,956 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:47,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:47,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:24:47,959 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:24:47,959 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:47,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:47,960 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:24:47,960 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(746): ClusterId : 7c41c318-8439-42c4-a38c-bea1eac43988 2024-12-09T03:24:47,960 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:24:47,960 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/1588230740 2024-12-09T03:24:47,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/1588230740 2024-12-09T03:24:47,962 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:24:47,962 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:24:47,963 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:24:47,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:24:47,966 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:24:47,967 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775872, jitterRate=-0.013427749276161194}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:24:47,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733714687948Initializing all the Stores at 1733714687949 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714687949Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714687949Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714687949Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714687949Cleaning up temporary data from old regions at 1733714687962 (+13 ms)Region opened successfully at 1733714687967 (+5 ms) 2024-12-09T03:24:47,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:24:47,968 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:24:47,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:24:47,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:24:47,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:24:47,968 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:24:47,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714687968Disabling compacts and flushes for region at 1733714687968Disabling writes for close at 1733714687968Writing region close event to WAL at 1733714687968Closed at 1733714687968 2024-12-09T03:24:47,970 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:24:47,970 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T03:24:47,970 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T03:24:47,970 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:24:47,971 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:24:47,972 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:24:47,973 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T03:24:47,981 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:24:47,982 DEBUG [RS:0;1617b0b1421f:35041 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc1f0d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:24:47,993 DEBUG [RS:0;1617b0b1421f:35041 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1617b0b1421f:35041 2024-12-09T03:24:47,993 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:24:47,993 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:24:47,993 DEBUG [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:24:47,994 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(2659): reportForDuty to master=1617b0b1421f,44773,1733714687325 with port=35041, startcode=1733714687530 2024-12-09T03:24:47,994 DEBUG [RS:0;1617b0b1421f:35041 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:24:47,997 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37199, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:24:47,998 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44773 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1617b0b1421f,35041,1733714687530 2024-12-09T03:24:47,998 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44773 {}] master.ServerManager(517): Registering regionserver=1617b0b1421f,35041,1733714687530 2024-12-09T03:24:48,000 DEBUG [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d 2024-12-09T03:24:48,000 DEBUG [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46167 2024-12-09T03:24:48,000 DEBUG [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:24:48,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:24:48,012 DEBUG [RS:0;1617b0b1421f:35041 {}] zookeeper.ZKUtil(111): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1617b0b1421f,35041,1733714687530 2024-12-09T03:24:48,012 WARN [RS:0;1617b0b1421f:35041 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:24:48,012 INFO [RS:0;1617b0b1421f:35041 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:24:48,012 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1617b0b1421f,35041,1733714687530] 2024-12-09T03:24:48,012 DEBUG [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/WALs/1617b0b1421f,35041,1733714687530 2024-12-09T03:24:48,016 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:24:48,021 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:24:48,022 INFO [RS:0;1617b0b1421f:35041 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:24:48,022 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,022 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:24:48,024 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:24:48,024 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,024 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:48,024 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:48,024 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:48,025 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:48,025 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:48,025 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:24:48,025 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:48,025 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:48,025 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:48,025 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:48,025 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:48,025 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:48,025 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:24:48,026 DEBUG [RS:0;1617b0b1421f:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:24:48,026 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,026 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,026 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,026 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,026 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,027 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,35041,1733714687530-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:24:48,040 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:24:48,040 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,35041,1733714687530-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,040 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,041 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.Replication(171): 1617b0b1421f,35041,1733714687530 started 2024-12-09T03:24:48,054 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,054 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(1482): Serving as 1617b0b1421f,35041,1733714687530, RpcServer on 1617b0b1421f/172.17.0.3:35041, sessionid=0x100089abc120001 2024-12-09T03:24:48,054 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:24:48,054 DEBUG [RS:0;1617b0b1421f:35041 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1617b0b1421f,35041,1733714687530 2024-12-09T03:24:48,054 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,35041,1733714687530' 2024-12-09T03:24:48,054 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:24:48,055 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:24:48,056 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:24:48,056 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:24:48,056 DEBUG [RS:0;1617b0b1421f:35041 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1617b0b1421f,35041,1733714687530 2024-12-09T03:24:48,056 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,35041,1733714687530' 2024-12-09T03:24:48,056 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:24:48,056 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:24:48,057 DEBUG [RS:0;1617b0b1421f:35041 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:24:48,057 INFO [RS:0;1617b0b1421f:35041 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:24:48,057 INFO [RS:0;1617b0b1421f:35041 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:24:48,124 WARN [1617b0b1421f:44773 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T03:24:48,161 INFO [RS:0;1617b0b1421f:35041 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C35041%2C1733714687530, suffix=, logDir=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/WALs/1617b0b1421f,35041,1733714687530, archiveDir=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/oldWALs, maxLogs=32 2024-12-09T03:24:48,165 INFO [RS:0;1617b0b1421f:35041 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C35041%2C1733714687530.1733714688164 2024-12-09T03:24:48,172 INFO [RS:0;1617b0b1421f:35041 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/WALs/1617b0b1421f,35041,1733714687530/1617b0b1421f%2C35041%2C1733714687530.1733714688164 2024-12-09T03:24:48,174 DEBUG [RS:0;1617b0b1421f:35041 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43361:43361),(127.0.0.1/127.0.0.1:43613:43613)] 2024-12-09T03:24:48,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:48,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:48,374 DEBUG [1617b0b1421f:44773 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T03:24:48,375 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1617b0b1421f,35041,1733714687530 2024-12-09T03:24:48,377 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,35041,1733714687530, state=OPENING 2024-12-09T03:24:48,391 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T03:24:48,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:48,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:48,482 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:24:48,482 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:24:48,482 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:24:48,483 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,35041,1733714687530}] 2024-12-09T03:24:48,639 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T03:24:48,645 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49295, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T03:24:48,653 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T03:24:48,653 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:24:48,657 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C35041%2C1733714687530.meta, suffix=.meta, logDir=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/WALs/1617b0b1421f,35041,1733714687530, archiveDir=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/oldWALs, maxLogs=32 2024-12-09T03:24:48,661 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C35041%2C1733714687530.meta.1733714688660.meta 2024-12-09T03:24:48,669 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/WALs/1617b0b1421f,35041,1733714687530/1617b0b1421f%2C35041%2C1733714687530.meta.1733714688660.meta 2024-12-09T03:24:48,670 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43361:43361),(127.0.0.1/127.0.0.1:43613:43613)] 2024-12-09T03:24:48,671 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:24:48,672 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T03:24:48,672 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T03:24:48,672 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T03:24:48,672 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T03:24:48,672 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:24:48,673 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T03:24:48,673 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T03:24:48,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:24:48,676 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:24:48,676 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:48,677 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:48,677 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:24:48,678 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:24:48,678 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:48,679 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:48,679 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:24:48,680 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:24:48,680 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:48,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:48,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:24:48,682 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:24:48,682 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:48,683 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:48,683 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:24:48,685 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/1588230740 2024-12-09T03:24:48,687 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/1588230740 2024-12-09T03:24:48,688 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:24:48,688 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:24:48,689 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:24:48,691 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:24:48,692 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=791122, jitterRate=0.005964189767837524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:24:48,692 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T03:24:48,693 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733714688673Writing region info on filesystem at 1733714688673Initializing all the Stores at 1733714688674 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714688674Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714688675 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714688675Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714688675Cleaning up temporary data from old regions at 1733714688688 (+13 ms)Running coprocessor post-open hooks at 1733714688692 (+4 ms)Region opened successfully at 1733714688693 (+1 ms) 2024-12-09T03:24:48,694 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733714688638 2024-12-09T03:24:48,697 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T03:24:48,698 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T03:24:48,698 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,35041,1733714687530 2024-12-09T03:24:48,700 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,35041,1733714687530, state=OPEN 2024-12-09T03:24:48,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:24:48,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:24:48,742 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1617b0b1421f,35041,1733714687530 2024-12-09T03:24:48,742 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:24:48,742 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:24:48,746 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T03:24:48,746 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,35041,1733714687530 in 260 msec 2024-12-09T03:24:48,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T03:24:48,750 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 776 msec 2024-12-09T03:24:48,751 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:24:48,751 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T03:24:48,753 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:24:48,753 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,35041,1733714687530, seqNum=-1] 2024-12-09T03:24:48,753 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:24:48,755 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46343, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:24:48,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 841 msec 2024-12-09T03:24:48,763 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733714688763, completionTime=-1 2024-12-09T03:24:48,763 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T03:24:48,763 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T03:24:48,765 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T03:24:48,765 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733714748765 2024-12-09T03:24:48,765 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733714808765 2024-12-09T03:24:48,765 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T03:24:48,766 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,44773,1733714687325-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,766 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,44773,1733714687325-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,766 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,44773,1733714687325-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,766 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1617b0b1421f:44773, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,766 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,766 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,768 DEBUG [master/1617b0b1421f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T03:24:48,771 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.166sec 2024-12-09T03:24:48,771 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T03:24:48,771 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T03:24:48,771 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T03:24:48,771 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T03:24:48,771 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T03:24:48,771 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,44773,1733714687325-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:24:48,771 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,44773,1733714687325-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T03:24:48,774 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T03:24:48,774 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T03:24:48,775 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,44773,1733714687325-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:48,782 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:24:48,786 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:48,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:48,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f08fec3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:24:48,861 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1617b0b1421f,44773,-1 for getting cluster id 2024-12-09T03:24:48,861 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T03:24:48,863 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7c41c318-8439-42c4-a38c-bea1eac43988' 2024-12-09T03:24:48,863 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T03:24:48,863 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7c41c318-8439-42c4-a38c-bea1eac43988" 2024-12-09T03:24:48,864 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ba47445, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:24:48,864 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1617b0b1421f,44773,-1] 2024-12-09T03:24:48,864 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T03:24:48,865 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:48,867 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60940, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T03:24:48,868 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63bb2e54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:24:48,868 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:24:48,869 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,35041,1733714687530, seqNum=-1] 2024-12-09T03:24:48,870 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:24:48,872 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:32964, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:24:48,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1617b0b1421f,44773,1733714687325 2024-12-09T03:24:48,874 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:48,877 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T03:24:48,878 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T03:24:48,878 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:24:48,878 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:24:48,878 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:48,878 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:48,878 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T03:24:48,878 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T03:24:48,878 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=650663950, stopped=false 2024-12-09T03:24:48,878 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1617b0b1421f,44773,1733714687325 2024-12-09T03:24:48,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:24:48,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:24:48,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:48,900 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:24:48,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:48,900 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:24:48,900 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:24:48,901 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:48,901 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:24:48,901 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1617b0b1421f,35041,1733714687530' ***** 2024-12-09T03:24:48,901 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:24:48,901 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:24:48,901 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:24:48,901 INFO [RS:0;1617b0b1421f:35041 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:24:48,901 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:24:48,901 INFO [RS:0;1617b0b1421f:35041 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:24:48,901 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(959): stopping server 1617b0b1421f,35041,1733714687530 2024-12-09T03:24:48,901 INFO [RS:0;1617b0b1421f:35041 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:24:48,901 INFO [RS:0;1617b0b1421f:35041 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1617b0b1421f:35041. 2024-12-09T03:24:48,901 DEBUG [RS:0;1617b0b1421f:35041 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:24:48,902 DEBUG [RS:0;1617b0b1421f:35041 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:48,902 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:24:48,902 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:24:48,902 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:24:48,902 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T03:24:48,902 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T03:24:48,902 DEBUG [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T03:24:48,902 DEBUG [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T03:24:48,902 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:24:48,902 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:24:48,902 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:24:48,902 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:24:48,902 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:24:48,903 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T03:24:48,918 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/1588230740/.tmp/ns/28f37ceb51e7417f8de4f92f39c24bd2 is 43, key is default/ns:d/1733714688756/Put/seqid=0 2024-12-09T03:24:48,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741835_1011 (size=5153) 2024-12-09T03:24:48,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741835_1011 (size=5153) 2024-12-09T03:24:48,923 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/1588230740/.tmp/ns/28f37ceb51e7417f8de4f92f39c24bd2 2024-12-09T03:24:48,932 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/1588230740/.tmp/ns/28f37ceb51e7417f8de4f92f39c24bd2 as hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/1588230740/ns/28f37ceb51e7417f8de4f92f39c24bd2 2024-12-09T03:24:48,939 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/1588230740/ns/28f37ceb51e7417f8de4f92f39c24bd2, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T03:24:48,940 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false 2024-12-09T03:24:48,940 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T03:24:48,945 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T03:24:48,946 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:24:48,946 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:24:48,946 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714688902Running coprocessor pre-close hooks at 1733714688902Disabling compacts and flushes for region at 1733714688902Disabling writes for close at 1733714688902Obtaining lock to block concurrent updates at 1733714688903 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733714688903Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733714688903Flushing stores of hbase:meta,,1.1588230740 at 1733714688904 (+1 ms)Flushing 1588230740/ns: creating writer at 1733714688904Flushing 1588230740/ns: appending metadata at 1733714688917 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733714688917Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@624f39d: reopening flushed file at 1733714688931 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false at 1733714688940 (+9 ms)Writing region close event to WAL at 1733714688941 (+1 ms)Running coprocessor post-close hooks at 1733714688946 (+5 ms)Closed at 1733714688946 2024-12-09T03:24:48,946 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T03:24:49,033 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T03:24:49,033 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T03:24:49,102 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(976): stopping server 1617b0b1421f,35041,1733714687530; all regions closed. 2024-12-09T03:24:49,103 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,103 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,103 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,103 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,103 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741834_1010 (size=1152) 2024-12-09T03:24:49,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741834_1010 (size=1152) 2024-12-09T03:24:49,509 DEBUG [RS:0;1617b0b1421f:35041 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/oldWALs 2024-12-09T03:24:49,509 INFO [RS:0;1617b0b1421f:35041 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C35041%2C1733714687530.meta:.meta(num 1733714688660) 2024-12-09T03:24:49,510 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,510 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,510 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,510 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,510 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741833_1009 (size=93) 2024-12-09T03:24:49,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741833_1009 (size=93) 2024-12-09T03:24:49,515 DEBUG [RS:0;1617b0b1421f:35041 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/oldWALs 2024-12-09T03:24:49,515 INFO [RS:0;1617b0b1421f:35041 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C35041%2C1733714687530:(num 1733714688164) 2024-12-09T03:24:49,515 DEBUG [RS:0;1617b0b1421f:35041 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:49,515 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:24:49,515 INFO [RS:0;1617b0b1421f:35041 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:24:49,515 INFO [RS:0;1617b0b1421f:35041 {}] hbase.ChoreService(370): Chore service for: regionserver/1617b0b1421f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T03:24:49,515 INFO [RS:0;1617b0b1421f:35041 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:24:49,515 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:24:49,515 INFO [RS:0;1617b0b1421f:35041 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35041 2024-12-09T03:24:49,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1617b0b1421f,35041,1733714687530 2024-12-09T03:24:49,538 INFO [RS:0;1617b0b1421f:35041 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:24:49,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:24:49,548 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1617b0b1421f,35041,1733714687530] 2024-12-09T03:24:49,559 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1617b0b1421f,35041,1733714687530 already deleted, retry=false 2024-12-09T03:24:49,559 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1617b0b1421f,35041,1733714687530 expired; onlineServers=0 2024-12-09T03:24:49,559 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1617b0b1421f,44773,1733714687325' ***** 2024-12-09T03:24:49,559 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T03:24:49,559 INFO [M:0;1617b0b1421f:44773 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:24:49,559 INFO [M:0;1617b0b1421f:44773 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:24:49,559 DEBUG [M:0;1617b0b1421f:44773 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T03:24:49,559 DEBUG [M:0;1617b0b1421f:44773 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T03:24:49,559 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T03:24:49,559 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714687930 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714687930,5,FailOnTimeoutGroup] 2024-12-09T03:24:49,559 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714687931 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714687931,5,FailOnTimeoutGroup] 2024-12-09T03:24:49,560 INFO [M:0;1617b0b1421f:44773 {}] hbase.ChoreService(370): Chore service for: master/1617b0b1421f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T03:24:49,560 INFO [M:0;1617b0b1421f:44773 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:24:49,560 DEBUG [M:0;1617b0b1421f:44773 {}] master.HMaster(1795): Stopping service threads 2024-12-09T03:24:49,560 INFO [M:0;1617b0b1421f:44773 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T03:24:49,560 INFO [M:0;1617b0b1421f:44773 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:24:49,560 INFO [M:0;1617b0b1421f:44773 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T03:24:49,560 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T03:24:49,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T03:24:49,570 DEBUG [M:0;1617b0b1421f:44773 {}] zookeeper.ZKUtil(347): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T03:24:49,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:49,570 WARN [M:0;1617b0b1421f:44773 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T03:24:49,570 INFO [M:0;1617b0b1421f:44773 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/.lastflushedseqids 2024-12-09T03:24:49,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741836_1012 (size=99) 2024-12-09T03:24:49,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741836_1012 (size=99) 2024-12-09T03:24:49,577 INFO [M:0;1617b0b1421f:44773 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T03:24:49,577 INFO [M:0;1617b0b1421f:44773 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T03:24:49,577 DEBUG [M:0;1617b0b1421f:44773 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:24:49,577 INFO [M:0;1617b0b1421f:44773 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:49,577 DEBUG [M:0;1617b0b1421f:44773 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:49,578 DEBUG [M:0;1617b0b1421f:44773 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-12-09T03:24:49,578 DEBUG [M:0;1617b0b1421f:44773 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:49,578 INFO [M:0;1617b0b1421f:44773 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T03:24:49,593 DEBUG [M:0;1617b0b1421f:44773 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e70af6f07d4e415686f8c8b1fda91793 is 82, key is hbase:meta,,1/info:regioninfo/1733714688698/Put/seqid=0 2024-12-09T03:24:49,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741837_1013 (size=5672) 2024-12-09T03:24:49,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741837_1013 (size=5672) 2024-12-09T03:24:49,600 INFO [M:0;1617b0b1421f:44773 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e70af6f07d4e415686f8c8b1fda91793 2024-12-09T03:24:49,619 DEBUG [M:0;1617b0b1421f:44773 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/82408cb46e914c4a8c580e9ff4647ba0 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733714688762/Put/seqid=0 2024-12-09T03:24:49,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741838_1014 (size=5275) 2024-12-09T03:24:49,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741838_1014 (size=5275) 2024-12-09T03:24:49,624 INFO [M:0;1617b0b1421f:44773 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/82408cb46e914c4a8c580e9ff4647ba0 2024-12-09T03:24:49,647 DEBUG [M:0;1617b0b1421f:44773 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dcc7358ff03d4a42b9f80497ed6b7c33 is 69, key is 1617b0b1421f,35041,1733714687530/rs:state/1733714687998/Put/seqid=0 2024-12-09T03:24:49,648 INFO [RS:0;1617b0b1421f:35041 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:24:49,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:24:49,649 INFO [RS:0;1617b0b1421f:35041 {}] regionserver.HRegionServer(1031): Exiting; stopping=1617b0b1421f,35041,1733714687530; zookeeper connection closed. 2024-12-09T03:24:49,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x100089abc120001, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:24:49,649 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6bc187f6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6bc187f6 2024-12-09T03:24:49,649 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T03:24:49,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741839_1015 (size=5156) 2024-12-09T03:24:49,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741839_1015 (size=5156) 2024-12-09T03:24:49,652 INFO [M:0;1617b0b1421f:44773 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dcc7358ff03d4a42b9f80497ed6b7c33 2024-12-09T03:24:49,674 DEBUG [M:0;1617b0b1421f:44773 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a2c611a452f47ffab85b4c24de7289e is 52, key is load_balancer_on/state:d/1733714688876/Put/seqid=0 2024-12-09T03:24:49,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741840_1016 (size=5056) 2024-12-09T03:24:49,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741840_1016 (size=5056) 2024-12-09T03:24:49,680 INFO [M:0;1617b0b1421f:44773 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a2c611a452f47ffab85b4c24de7289e 2024-12-09T03:24:49,687 DEBUG [M:0;1617b0b1421f:44773 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e70af6f07d4e415686f8c8b1fda91793 as hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e70af6f07d4e415686f8c8b1fda91793 2024-12-09T03:24:49,693 INFO [M:0;1617b0b1421f:44773 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e70af6f07d4e415686f8c8b1fda91793, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T03:24:49,694 DEBUG [M:0;1617b0b1421f:44773 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/82408cb46e914c4a8c580e9ff4647ba0 as hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/82408cb46e914c4a8c580e9ff4647ba0 2024-12-09T03:24:49,700 INFO [M:0;1617b0b1421f:44773 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/82408cb46e914c4a8c580e9ff4647ba0, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T03:24:49,701 DEBUG [M:0;1617b0b1421f:44773 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dcc7358ff03d4a42b9f80497ed6b7c33 as hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dcc7358ff03d4a42b9f80497ed6b7c33 2024-12-09T03:24:49,707 INFO [M:0;1617b0b1421f:44773 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dcc7358ff03d4a42b9f80497ed6b7c33, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T03:24:49,709 DEBUG [M:0;1617b0b1421f:44773 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a2c611a452f47ffab85b4c24de7289e as hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5a2c611a452f47ffab85b4c24de7289e 2024-12-09T03:24:49,715 INFO [M:0;1617b0b1421f:44773 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46167/user/jenkins/test-data/56cc2894-e593-fa19-09e5-1fd8a5444c4d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5a2c611a452f47ffab85b4c24de7289e, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T03:24:49,717 INFO [M:0;1617b0b1421f:44773 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=29, compaction requested=false 2024-12-09T03:24:49,718 INFO [M:0;1617b0b1421f:44773 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:49,718 DEBUG [M:0;1617b0b1421f:44773 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714689577Disabling compacts and flushes for region at 1733714689577Disabling writes for close at 1733714689578 (+1 ms)Obtaining lock to block concurrent updates at 1733714689578Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733714689578Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733714689578Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733714689579 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733714689579Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733714689593 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733714689593Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733714689604 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733714689618 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733714689618Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733714689630 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733714689646 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733714689646Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733714689658 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733714689673 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733714689673Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b0ebff0: reopening flushed file at 1733714689686 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58af6fd8: reopening flushed file at 1733714689694 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7166534f: reopening flushed file at 1733714689700 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43363c24: reopening flushed file at 1733714689707 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=29, compaction requested=false at 1733714689717 (+10 ms)Writing region close event to WAL at 1733714689718 (+1 ms)Closed at 1733714689718 2024-12-09T03:24:49,719 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,719 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,719 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,719 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,719 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:24:49,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42215 is added to blk_1073741830_1006 (size=10311) 2024-12-09T03:24:49,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44087 is added to blk_1073741830_1006 (size=10311) 2024-12-09T03:24:49,722 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:24:49,722 INFO [M:0;1617b0b1421f:44773 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T03:24:49,722 INFO [M:0;1617b0b1421f:44773 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44773 2024-12-09T03:24:49,722 INFO [M:0;1617b0b1421f:44773 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:24:49,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:24:49,856 INFO [M:0;1617b0b1421f:44773 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:24:49,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44773-0x100089abc120000, quorum=127.0.0.1:50017, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:24:49,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62c9cc57{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:24:49,860 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3756c399{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:24:49,860 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:24:49,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c5ed954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:24:49,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@592c88e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/hadoop.log.dir/,STOPPED} 2024-12-09T03:24:49,861 WARN [BP-533771429-172.17.0.3-1733714684736 heartbeating to localhost/127.0.0.1:46167 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:24:49,861 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:24:49,861 WARN [BP-533771429-172.17.0.3-1733714684736 heartbeating to localhost/127.0.0.1:46167 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-533771429-172.17.0.3-1733714684736 (Datanode Uuid 6f401ca3-313a-4c75-a094-b543eddd3316) service to localhost/127.0.0.1:46167 2024-12-09T03:24:49,861 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:24:49,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29/data/data3/current/BP-533771429-172.17.0.3-1733714684736 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:24:49,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29/data/data4/current/BP-533771429-172.17.0.3-1733714684736 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:24:49,862 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:24:49,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12742a74{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:24:49,869 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f1e2b4c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:24:49,869 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:24:49,870 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3a779{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:24:49,870 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f754f75{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/hadoop.log.dir/,STOPPED} 2024-12-09T03:24:49,871 WARN [BP-533771429-172.17.0.3-1733714684736 heartbeating to localhost/127.0.0.1:46167 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:24:49,871 WARN [BP-533771429-172.17.0.3-1733714684736 heartbeating to localhost/127.0.0.1:46167 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-533771429-172.17.0.3-1733714684736 (Datanode Uuid 94a5beb5-168f-478e-bdf5-305bb2b7d7e5) service to localhost/127.0.0.1:46167 2024-12-09T03:24:49,872 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29/data/data1/current/BP-533771429-172.17.0.3-1733714684736 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:24:49,872 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/cluster_5025c1dd-e831-212e-8a9a-4158f7c64a29/data/data2/current/BP-533771429-172.17.0.3-1733714684736 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:24:49,872 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:24:49,872 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:24:49,872 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:24:49,877 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b839c20{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:24:49,877 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d00b522{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:24:49,878 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:24:49,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c8f0dfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:24:49,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4340a53c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/hadoop.log.dir/,STOPPED} 2024-12-09T03:24:49,883 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T03:24:49,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T03:24:49,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T03:24:49,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/hadoop.log.dir so I do NOT create it in target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404 2024-12-09T03:24:49,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81d2985b-abce-2401-771d-315b23e3922e/hadoop.tmp.dir so I do NOT create it in target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404 2024-12-09T03:24:49,898 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995, deleteOnExit=true 2024-12-09T03:24:49,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T03:24:49,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/test.cache.data in system properties and HBase conf 2024-12-09T03:24:49,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T03:24:49,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir in system properties and HBase conf 2024-12-09T03:24:49,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T03:24:49,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T03:24:49,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T03:24:49,899 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T03:24:49,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:24:49,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:24:49,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T03:24:49,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:24:49,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T03:24:49,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T03:24:49,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:24:49,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:24:49,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T03:24:49,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/nfs.dump.dir in system properties and HBase conf 2024-12-09T03:24:49,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/java.io.tmpdir in system properties and HBase conf 2024-12-09T03:24:49,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:24:49,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T03:24:49,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T03:24:49,911 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:24:50,027 INFO [regionserver/1617b0b1421f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:24:50,373 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:24:50,378 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:24:50,379 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:24:50,379 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:24:50,379 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:24:50,380 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:24:50,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@281d64b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:24:50,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37564f36{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:24:50,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58a4fc41{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/java.io.tmpdir/jetty-localhost-42809-hadoop-hdfs-3_4_1-tests_jar-_-any-1168079677617842807/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:24:50,471 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e92d0c5{HTTP/1.1, (http/1.1)}{localhost:42809} 2024-12-09T03:24:50,471 INFO [Time-limited test {}] server.Server(415): Started @110722ms 2024-12-09T03:24:50,483 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:24:50,882 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:24:50,885 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:24:50,886 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:24:50,886 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:24:50,886 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:24:50,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@198a2712{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:24:50,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@172c9107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:24:50,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32694da{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/java.io.tmpdir/jetty-localhost-35785-hadoop-hdfs-3_4_1-tests_jar-_-any-706567256649453094/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:24:50,978 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@150beccb{HTTP/1.1, (http/1.1)}{localhost:35785} 2024-12-09T03:24:50,978 INFO [Time-limited test {}] server.Server(415): Started @111228ms 2024-12-09T03:24:50,980 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:24:51,011 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:24:51,014 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:24:51,015 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:24:51,015 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:24:51,015 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:24:51,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62c98b43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:24:51,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3372b5da{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:24:51,110 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7856d523{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/java.io.tmpdir/jetty-localhost-35515-hadoop-hdfs-3_4_1-tests_jar-_-any-5111545352316965664/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:24:51,110 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c24bf58{HTTP/1.1, (http/1.1)}{localhost:35515} 2024-12-09T03:24:51,110 INFO [Time-limited test {}] server.Server(415): Started @111360ms 2024-12-09T03:24:51,112 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:24:52,336 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data2/current/BP-1669423292-172.17.0.3-1733714689922/current, will proceed with Du for space computation calculation, 2024-12-09T03:24:52,336 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data1/current/BP-1669423292-172.17.0.3-1733714689922/current, will proceed with Du for space computation calculation, 2024-12-09T03:24:52,351 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:24:52,353 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf999099b7366ef6d with lease ID 0xfb01fff9985019c7: Processing first storage report for DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020 from datanode DatanodeRegistration(127.0.0.1:44369, datanodeUuid=47afa076-6b0a-4d9f-bd88-112849ba6ab9, infoPort=45555, infoSecurePort=0, ipcPort=36631, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922) 2024-12-09T03:24:52,354 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf999099b7366ef6d with lease ID 0xfb01fff9985019c7: from storage DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020 node DatanodeRegistration(127.0.0.1:44369, datanodeUuid=47afa076-6b0a-4d9f-bd88-112849ba6ab9, infoPort=45555, infoSecurePort=0, ipcPort=36631, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:24:52,354 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf999099b7366ef6d with lease ID 0xfb01fff9985019c7: Processing first storage report for DS-f4d0ede5-4d2f-40d2-a268-c9726a1a4e92 from datanode DatanodeRegistration(127.0.0.1:44369, datanodeUuid=47afa076-6b0a-4d9f-bd88-112849ba6ab9, infoPort=45555, infoSecurePort=0, ipcPort=36631, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922) 2024-12-09T03:24:52,354 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf999099b7366ef6d with lease ID 0xfb01fff9985019c7: from storage DS-f4d0ede5-4d2f-40d2-a268-c9726a1a4e92 node DatanodeRegistration(127.0.0.1:44369, datanodeUuid=47afa076-6b0a-4d9f-bd88-112849ba6ab9, infoPort=45555, infoSecurePort=0, ipcPort=36631, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:24:52,431 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data3/current/BP-1669423292-172.17.0.3-1733714689922/current, will proceed with Du for space computation calculation, 2024-12-09T03:24:52,432 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data4/current/BP-1669423292-172.17.0.3-1733714689922/current, will proceed with Du for space computation calculation, 2024-12-09T03:24:52,448 WARN [Thread-659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:24:52,450 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdc805e9866b2f368 with lease ID 0xfb01fff9985019c8: Processing first storage report for DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a from datanode DatanodeRegistration(127.0.0.1:44895, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34551, infoSecurePort=0, ipcPort=41855, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922) 2024-12-09T03:24:52,450 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdc805e9866b2f368 with lease ID 0xfb01fff9985019c8: from storage DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a node DatanodeRegistration(127.0.0.1:44895, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34551, infoSecurePort=0, ipcPort=41855, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T03:24:52,451 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdc805e9866b2f368 with lease ID 0xfb01fff9985019c8: Processing first storage report for DS-a96b63e4-0105-4d91-9f70-441e6d7d18e3 from datanode DatanodeRegistration(127.0.0.1:44895, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34551, infoSecurePort=0, ipcPort=41855, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922) 2024-12-09T03:24:52,451 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdc805e9866b2f368 with lease ID 0xfb01fff9985019c8: from storage DS-a96b63e4-0105-4d91-9f70-441e6d7d18e3 node DatanodeRegistration(127.0.0.1:44895, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34551, infoSecurePort=0, ipcPort=41855, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:24:52,554 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404 2024-12-09T03:24:52,557 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/zookeeper_0, clientPort=57669, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T03:24:52,558 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57669 2024-12-09T03:24:52,558 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:52,561 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:52,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44369 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:24:52,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44895 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:24:52,575 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce with version=8 2024-12-09T03:24:52,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/hbase-staging 2024-12-09T03:24:52,578 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:24:52,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:52,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:52,578 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:24:52,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:52,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:24:52,578 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T03:24:52,578 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:24:52,579 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34385 2024-12-09T03:24:52,580 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34385 connecting to ZooKeeper ensemble=127.0.0.1:57669 2024-12-09T03:24:52,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:343850x0, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:24:52,638 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34385-0x100089ad0930000 connected 2024-12-09T03:24:52,717 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:52,720 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:52,725 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:24:52,725 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce, hbase.cluster.distributed=false 2024-12-09T03:24:52,729 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:24:52,729 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34385 2024-12-09T03:24:52,730 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34385 2024-12-09T03:24:52,730 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34385 2024-12-09T03:24:52,730 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34385 2024-12-09T03:24:52,731 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34385 2024-12-09T03:24:52,747 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:24:52,747 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:52,747 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:52,748 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:24:52,748 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:52,748 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:24:52,748 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:24:52,748 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:24:52,749 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36295 2024-12-09T03:24:52,750 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36295 connecting to ZooKeeper ensemble=127.0.0.1:57669 2024-12-09T03:24:52,750 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:52,752 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:52,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:362950x0, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:24:52,768 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:362950x0, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:24:52,768 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36295-0x100089ad0930001 connected 2024-12-09T03:24:52,768 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:24:52,769 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:24:52,769 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:24:52,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:24:52,773 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36295 2024-12-09T03:24:52,773 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36295 2024-12-09T03:24:52,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36295 2024-12-09T03:24:52,776 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36295 2024-12-09T03:24:52,776 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36295 2024-12-09T03:24:52,786 DEBUG [M:0;1617b0b1421f:34385 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1617b0b1421f:34385 2024-12-09T03:24:52,787 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1617b0b1421f,34385,1733714692577 2024-12-09T03:24:52,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:24:52,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:24:52,799 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1617b0b1421f,34385,1733714692577 2024-12-09T03:24:52,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:52,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:24:52,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:52,812 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T03:24:52,813 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1617b0b1421f,34385,1733714692577 from backup master directory 2024-12-09T03:24:52,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1617b0b1421f,34385,1733714692577 2024-12-09T03:24:52,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:24:52,830 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:24:52,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:24:52,830 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1617b0b1421f,34385,1733714692577 2024-12-09T03:24:52,835 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/hbase.id] with ID: d8f3ed79-0ba9-4ed9-9ffd-c73df333dd79 2024-12-09T03:24:52,835 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/.tmp/hbase.id 2024-12-09T03:24:52,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44369 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:24:52,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44895 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:24:52,843 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/.tmp/hbase.id]:[hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/hbase.id] 2024-12-09T03:24:52,854 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:52,854 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T03:24:52,856 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T03:24:52,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:52,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:52,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44369 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:24:52,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44895 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:24:52,872 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:24:52,873 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T03:24:52,873 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:24:52,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44369 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:24:52,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44895 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:24:52,881 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store 2024-12-09T03:24:52,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44895 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:24:52,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44369 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:24:52,889 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:24:52,889 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:24:52,890 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:52,890 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:52,890 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:24:52,890 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:52,890 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:24:52,890 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714692889Disabling compacts and flushes for region at 1733714692889Disabling writes for close at 1733714692890 (+1 ms)Writing region close event to WAL at 1733714692890Closed at 1733714692890 2024-12-09T03:24:52,891 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/.initializing 2024-12-09T03:24:52,891 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577 2024-12-09T03:24:52,893 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C34385%2C1733714692577, suffix=, logDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577, archiveDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/oldWALs, maxLogs=10 2024-12-09T03:24:52,894 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C34385%2C1733714692577.1733714692894 2024-12-09T03:24:52,899 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577/1617b0b1421f%2C34385%2C1733714692577.1733714692894 2024-12-09T03:24:52,901 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34551:34551),(127.0.0.1/127.0.0.1:45555:45555)] 2024-12-09T03:24:52,902 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:24:52,902 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:24:52,902 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,902 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,905 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T03:24:52,905 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:52,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:52,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,907 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T03:24:52,907 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:52,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:24:52,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,909 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T03:24:52,909 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:52,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:24:52,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,911 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T03:24:52,911 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:52,911 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:24:52,912 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,912 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,913 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,914 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,914 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,914 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:24:52,916 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:24:52,918 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:24:52,919 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731345, jitterRate=-0.0700470507144928}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:24:52,919 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733714692902Initializing all the Stores at 1733714692903 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714692903Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714692903Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714692904 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714692904Cleaning up temporary data from old regions at 1733714692914 (+10 ms)Region opened successfully at 1733714692919 (+5 ms) 2024-12-09T03:24:52,920 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T03:24:52,924 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d49db06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:24:52,925 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T03:24:52,925 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T03:24:52,925 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T03:24:52,925 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T03:24:52,926 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T03:24:52,927 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T03:24:52,927 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T03:24:52,929 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T03:24:52,930 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T03:24:52,938 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T03:24:52,938 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T03:24:52,939 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T03:24:52,948 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T03:24:52,949 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T03:24:52,950 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T03:24:52,959 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T03:24:52,960 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T03:24:52,969 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T03:24:52,972 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T03:24:52,980 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T03:24:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:24:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:24:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:52,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:52,992 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1617b0b1421f,34385,1733714692577, sessionid=0x100089ad0930000, setting cluster-up flag (Was=false) 2024-12-09T03:24:53,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:53,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:53,043 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T03:24:53,045 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,34385,1733714692577 2024-12-09T03:24:53,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:53,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:53,096 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T03:24:53,098 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,34385,1733714692577 2024-12-09T03:24:53,100 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T03:24:53,103 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T03:24:53,103 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T03:24:53,104 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T03:24:53,104 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1617b0b1421f,34385,1733714692577 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T03:24:53,106 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:24:53,106 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:24:53,106 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:24:53,106 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:24:53,106 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1617b0b1421f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T03:24:53,106 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,106 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:24:53,106 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,107 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733714723107 2024-12-09T03:24:53,108 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T03:24:53,108 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T03:24:53,108 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T03:24:53,108 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T03:24:53,108 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T03:24:53,108 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T03:24:53,108 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,109 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T03:24:53,109 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T03:24:53,109 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T03:24:53,109 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:24:53,109 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T03:24:53,109 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T03:24:53,109 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T03:24:53,110 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714693109,5,FailOnTimeoutGroup] 2024-12-09T03:24:53,110 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714693110,5,FailOnTimeoutGroup] 2024-12-09T03:24:53,110 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,110 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T03:24:53,110 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,110 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,111 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:53,111 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T03:24:53,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44895 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:24:53,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44369 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:24:53,118 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T03:24:53,118 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce 2024-12-09T03:24:53,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44369 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:24:53,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44895 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:24:53,125 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:24:53,127 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:24:53,129 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:24:53,129 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:53,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:53,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:24:53,131 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:24:53,131 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:53,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:53,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:24:53,133 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:24:53,133 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:53,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:53,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:24:53,135 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:24:53,136 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:53,136 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:53,136 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:24:53,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740 2024-12-09T03:24:53,138 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740 2024-12-09T03:24:53,139 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:24:53,139 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:24:53,140 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:24:53,141 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:24:53,144 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:24:53,145 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=843626, jitterRate=0.07272645831108093}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:24:53,145 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733714693125Initializing all the Stores at 1733714693126 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714693126Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714693127 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714693127Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714693127Cleaning up temporary data from old regions at 1733714693139 (+12 ms)Region opened successfully at 1733714693145 (+6 ms) 2024-12-09T03:24:53,145 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:24:53,146 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:24:53,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:24:53,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:24:53,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:24:53,146 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:24:53,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714693145Disabling compacts and flushes for region at 1733714693145Disabling writes for close at 1733714693146 (+1 ms)Writing region close event to WAL at 1733714693146Closed at 1733714693146 2024-12-09T03:24:53,148 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:24:53,148 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T03:24:53,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T03:24:53,149 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:24:53,150 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T03:24:53,178 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(746): ClusterId : d8f3ed79-0ba9-4ed9-9ffd-c73df333dd79 2024-12-09T03:24:53,178 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:24:53,192 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:24:53,192 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:24:53,203 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:24:53,203 DEBUG [RS:0;1617b0b1421f:36295 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4af8fcc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:24:53,218 DEBUG [RS:0;1617b0b1421f:36295 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1617b0b1421f:36295 2024-12-09T03:24:53,218 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:24:53,218 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:24:53,218 DEBUG [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:24:53,219 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(2659): reportForDuty to master=1617b0b1421f,34385,1733714692577 with port=36295, startcode=1733714692747 2024-12-09T03:24:53,219 DEBUG [RS:0;1617b0b1421f:36295 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:24:53,221 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54535, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:24:53,222 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34385 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1617b0b1421f,36295,1733714692747 2024-12-09T03:24:53,222 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34385 {}] master.ServerManager(517): Registering regionserver=1617b0b1421f,36295,1733714692747 2024-12-09T03:24:53,223 DEBUG [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce 2024-12-09T03:24:53,223 DEBUG [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33407 2024-12-09T03:24:53,223 DEBUG [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:24:53,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:24:53,233 DEBUG [RS:0;1617b0b1421f:36295 {}] zookeeper.ZKUtil(111): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1617b0b1421f,36295,1733714692747 2024-12-09T03:24:53,233 WARN [RS:0;1617b0b1421f:36295 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:24:53,234 INFO [RS:0;1617b0b1421f:36295 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:24:53,234 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1617b0b1421f,36295,1733714692747] 2024-12-09T03:24:53,234 DEBUG [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747 2024-12-09T03:24:53,239 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:24:53,241 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:24:53,241 INFO [RS:0;1617b0b1421f:36295 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:24:53,241 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,241 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:24:53,242 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:24:53,243 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,243 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:53,244 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:24:53,244 DEBUG [RS:0;1617b0b1421f:36295 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:24:53,244 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,245 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,245 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,245 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,245 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,245 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,36295,1733714692747-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:24:53,258 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:24:53,258 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,36295,1733714692747-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,258 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,258 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.Replication(171): 1617b0b1421f,36295,1733714692747 started 2024-12-09T03:24:53,270 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,270 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(1482): Serving as 1617b0b1421f,36295,1733714692747, RpcServer on 1617b0b1421f/172.17.0.3:36295, sessionid=0x100089ad0930001 2024-12-09T03:24:53,270 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:24:53,271 DEBUG [RS:0;1617b0b1421f:36295 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1617b0b1421f,36295,1733714692747 2024-12-09T03:24:53,271 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,36295,1733714692747' 2024-12-09T03:24:53,271 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:24:53,271 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:24:53,272 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:24:53,272 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:24:53,272 DEBUG [RS:0;1617b0b1421f:36295 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1617b0b1421f,36295,1733714692747 2024-12-09T03:24:53,272 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,36295,1733714692747' 2024-12-09T03:24:53,272 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:24:53,272 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:24:53,273 DEBUG [RS:0;1617b0b1421f:36295 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:24:53,273 INFO [RS:0;1617b0b1421f:36295 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:24:53,273 INFO [RS:0;1617b0b1421f:36295 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:24:53,301 WARN [1617b0b1421f:34385 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T03:24:53,378 INFO [RS:0;1617b0b1421f:36295 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C36295%2C1733714692747, suffix=, logDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747, archiveDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs, maxLogs=32 2024-12-09T03:24:53,380 INFO [RS:0;1617b0b1421f:36295 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C36295%2C1733714692747.1733714693380 2024-12-09T03:24:53,389 INFO [RS:0;1617b0b1421f:36295 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 2024-12-09T03:24:53,391 DEBUG [RS:0;1617b0b1421f:36295 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45555:45555),(127.0.0.1/127.0.0.1:34551:34551)] 2024-12-09T03:24:53,551 DEBUG [1617b0b1421f:34385 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T03:24:53,553 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1617b0b1421f,36295,1733714692747 2024-12-09T03:24:53,557 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,36295,1733714692747, state=OPENING 2024-12-09T03:24:53,610 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T03:24:53,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:53,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:24:53,649 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:24:53,650 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:24:53,650 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:24:53,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,36295,1733714692747}] 2024-12-09T03:24:53,804 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T03:24:53,811 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57153, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T03:24:53,816 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T03:24:53,816 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:24:53,819 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C36295%2C1733714692747.meta, suffix=.meta, logDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747, archiveDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs, maxLogs=32 2024-12-09T03:24:53,820 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta 2024-12-09T03:24:53,826 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta 2024-12-09T03:24:53,828 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45555:45555),(127.0.0.1/127.0.0.1:34551:34551)] 2024-12-09T03:24:53,829 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:24:53,829 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T03:24:53,829 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T03:24:53,830 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T03:24:53,830 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T03:24:53,830 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:24:53,830 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T03:24:53,830 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T03:24:53,833 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:24:53,834 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:24:53,834 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:53,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:53,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:24:53,836 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:24:53,837 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:53,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:53,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:24:53,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:24:53,838 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:53,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:53,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:24:53,839 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:24:53,839 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:53,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:24:53,840 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:24:53,841 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740 2024-12-09T03:24:53,842 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740 2024-12-09T03:24:53,844 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:24:53,844 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:24:53,844 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:24:53,845 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:24:53,846 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753731, jitterRate=-0.04158191382884979}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:24:53,846 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T03:24:53,847 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733714693831Writing region info on filesystem at 1733714693831Initializing all the Stores at 1733714693832 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714693832Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714693832Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714693832Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714693832Cleaning up temporary data from old regions at 1733714693844 (+12 ms)Running coprocessor post-open hooks at 1733714693846 (+2 ms)Region opened successfully at 1733714693847 (+1 ms) 2024-12-09T03:24:53,848 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733714693804 2024-12-09T03:24:53,851 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T03:24:53,851 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T03:24:53,852 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,36295,1733714692747 2024-12-09T03:24:53,852 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,36295,1733714692747, state=OPEN 2024-12-09T03:24:53,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:24:53,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:24:53,893 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1617b0b1421f,36295,1733714692747 2024-12-09T03:24:53,893 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:24:53,893 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:24:53,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T03:24:53,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,36295,1733714692747 in 243 msec 2024-12-09T03:24:53,899 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T03:24:53,899 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 748 msec 2024-12-09T03:24:53,900 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:24:53,900 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T03:24:53,902 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:24:53,902 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,36295,1733714692747, seqNum=-1] 2024-12-09T03:24:53,902 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:24:53,903 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49309, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:24:53,910 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 807 msec 2024-12-09T03:24:53,910 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733714693910, completionTime=-1 2024-12-09T03:24:53,910 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T03:24:53,910 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T03:24:53,912 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T03:24:53,912 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733714753912 2024-12-09T03:24:53,912 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733714813912 2024-12-09T03:24:53,912 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T03:24:53,913 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,34385,1733714692577-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,913 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,34385,1733714692577-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,913 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,34385,1733714692577-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,913 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1617b0b1421f:34385, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,913 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,913 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,915 DEBUG [master/1617b0b1421f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T03:24:53,917 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.087sec 2024-12-09T03:24:53,917 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T03:24:53,917 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T03:24:53,917 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T03:24:53,917 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T03:24:53,917 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T03:24:53,917 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,34385,1733714692577-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:24:53,918 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,34385,1733714692577-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T03:24:53,920 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T03:24:53,920 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T03:24:53,921 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,34385,1733714692577-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:53,979 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a3883f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:24:53,979 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1617b0b1421f,34385,-1 for getting cluster id 2024-12-09T03:24:53,979 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T03:24:53,982 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd8f3ed79-0ba9-4ed9-9ffd-c73df333dd79' 2024-12-09T03:24:53,982 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T03:24:53,983 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d8f3ed79-0ba9-4ed9-9ffd-c73df333dd79" 2024-12-09T03:24:53,983 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ce378b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:24:53,983 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1617b0b1421f,34385,-1] 2024-12-09T03:24:53,984 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T03:24:53,984 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:24:53,986 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:32786, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T03:24:53,987 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7663cfa6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:24:53,988 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:24:53,989 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,36295,1733714692747, seqNum=-1] 2024-12-09T03:24:53,990 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:24:53,992 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47260, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:24:53,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1617b0b1421f,34385,1733714692577 2024-12-09T03:24:53,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:53,999 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T03:24:54,014 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:24:54,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:54,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:54,014 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:24:54,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:24:54,014 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:24:54,014 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:24:54,014 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:24:54,015 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33301 2024-12-09T03:24:54,016 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33301 connecting to ZooKeeper ensemble=127.0.0.1:57669 2024-12-09T03:24:54,017 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:54,018 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:24:54,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:333010x0, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:24:54,041 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33301-0x100089ad0930002 connected 2024-12-09T03:24:54,041 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:33301-0x100089ad0930002, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-09T03:24:54,041 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-09T03:24:54,043 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:24:54,043 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:24:54,044 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:33301-0x100089ad0930002, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T03:24:54,047 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33301-0x100089ad0930002, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:24:54,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33301 2024-12-09T03:24:54,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33301 2024-12-09T03:24:54,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33301 2024-12-09T03:24:54,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33301 2024-12-09T03:24:54,050 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33301 2024-12-09T03:24:54,052 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer(746): ClusterId : d8f3ed79-0ba9-4ed9-9ffd-c73df333dd79 2024-12-09T03:24:54,052 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:24:54,065 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:24:54,065 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:24:54,076 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:24:54,076 DEBUG [RS:1;1617b0b1421f:33301 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9307741, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:24:54,088 DEBUG [RS:1;1617b0b1421f:33301 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;1617b0b1421f:33301 2024-12-09T03:24:54,089 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:24:54,089 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:24:54,089 DEBUG [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:24:54,090 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer(2659): reportForDuty to master=1617b0b1421f,34385,1733714692577 with port=33301, startcode=1733714694014 2024-12-09T03:24:54,090 DEBUG [RS:1;1617b0b1421f:33301 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:24:54,092 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57381, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:24:54,092 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34385 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1617b0b1421f,33301,1733714694014 2024-12-09T03:24:54,092 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34385 {}] master.ServerManager(517): Registering regionserver=1617b0b1421f,33301,1733714694014 2024-12-09T03:24:54,094 DEBUG [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce 2024-12-09T03:24:54,094 DEBUG [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33407 2024-12-09T03:24:54,094 DEBUG [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:24:54,106 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:24:54,107 DEBUG [RS:1;1617b0b1421f:33301 {}] zookeeper.ZKUtil(111): regionserver:33301-0x100089ad0930002, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1617b0b1421f,33301,1733714694014 2024-12-09T03:24:54,107 WARN [RS:1;1617b0b1421f:33301 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:24:54,107 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1617b0b1421f,33301,1733714694014] 2024-12-09T03:24:54,107 INFO [RS:1;1617b0b1421f:33301 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:24:54,107 DEBUG [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014 2024-12-09T03:24:54,112 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:24:54,113 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:24:54,113 INFO [RS:1;1617b0b1421f:33301 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:24:54,114 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:54,114 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:24:54,115 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:24:54,115 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:54,115 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:54,115 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:54,115 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:54,115 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:54,115 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:54,116 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:24:54,116 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:54,116 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:54,116 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:54,116 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:54,116 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:54,116 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:24:54,116 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:24:54,116 DEBUG [RS:1;1617b0b1421f:33301 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:24:54,122 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:54,122 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:54,122 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:54,122 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:54,122 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:54,122 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,33301,1733714694014-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:24:54,135 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:24:54,135 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,33301,1733714694014-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:54,135 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:54,135 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.Replication(171): 1617b0b1421f,33301,1733714694014 started 2024-12-09T03:24:54,147 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:24:54,147 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer(1482): Serving as 1617b0b1421f,33301,1733714694014, RpcServer on 1617b0b1421f/172.17.0.3:33301, sessionid=0x100089ad0930002 2024-12-09T03:24:54,147 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:24:54,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;1617b0b1421f:33301,5,FailOnTimeoutGroup] 2024-12-09T03:24:54,147 DEBUG [RS:1;1617b0b1421f:33301 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1617b0b1421f,33301,1733714694014 2024-12-09T03:24:54,147 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,33301,1733714694014' 2024-12-09T03:24:54,147 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:24:54,148 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-09T03:24:54,148 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T03:24:54,148 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:24:54,149 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:24:54,149 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:24:54,149 DEBUG [RS:1;1617b0b1421f:33301 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1617b0b1421f,33301,1733714694014 2024-12-09T03:24:54,149 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,33301,1733714694014' 2024-12-09T03:24:54,149 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:24:54,149 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 1617b0b1421f,34385,1733714692577 2024-12-09T03:24:54,149 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:24:54,149 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3f6920e4 2024-12-09T03:24:54,149 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T03:24:54,150 DEBUG [RS:1;1617b0b1421f:33301 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:24:54,150 INFO [RS:1;1617b0b1421f:33301 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:24:54,150 INFO [RS:1;1617b0b1421f:33301 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:24:54,151 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:32790, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T03:24:54,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34385 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T03:24:54,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34385 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T03:24:54,152 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34385 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:24:54,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34385 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T03:24:54,154 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T03:24:54,154 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:54,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34385 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-09T03:24:54,156 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T03:24:54,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34385 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:24:54,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44369 is added to blk_1073741835_1011 (size=393) 2024-12-09T03:24:54,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44895 is added to blk_1073741835_1011 (size=393) 2024-12-09T03:24:54,165 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0d8984204cdec39babdcd759e451afa1, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce 2024-12-09T03:24:54,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44895 is added to blk_1073741836_1012 (size=76) 2024-12-09T03:24:54,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44369 is added to blk_1073741836_1012 (size=76) 2024-12-09T03:24:54,173 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:24:54,173 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 0d8984204cdec39babdcd759e451afa1, disabling compactions & flushes 2024-12-09T03:24:54,173 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:24:54,173 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:24:54,174 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. after waiting 0 ms 2024-12-09T03:24:54,174 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:24:54,174 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:24:54,174 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0d8984204cdec39babdcd759e451afa1: Waiting for close lock at 1733714694173Disabling compacts and flushes for region at 1733714694173Disabling writes for close at 1733714694174 (+1 ms)Writing region close event to WAL at 1733714694174Closed at 1733714694174 2024-12-09T03:24:54,175 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:24:54,176 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T03:24:54,176 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733714694176"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733714694176"}]},"ts":"1733714694176"} 2024-12-09T03:24:54,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:54,179 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T03:24:54,180 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T03:24:54,180 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733714694180"}]},"ts":"1733714694180"} 2024-12-09T03:24:54,183 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-09T03:24:54,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=0d8984204cdec39babdcd759e451afa1, ASSIGN}] 2024-12-09T03:24:54,186 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=0d8984204cdec39babdcd759e451afa1, ASSIGN 2024-12-09T03:24:54,188 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=0d8984204cdec39babdcd759e451afa1, ASSIGN; state=OFFLINE, location=1617b0b1421f,36295,1733714692747; forceNewPlan=false, retain=false 2024-12-09T03:24:54,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:54,194 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:54,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:54,252 INFO [RS:1;1617b0b1421f:33301 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C33301%2C1733714694014, suffix=, logDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014, archiveDir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs, maxLogs=32 2024-12-09T03:24:54,252 INFO [RS:1;1617b0b1421f:33301 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C33301%2C1733714694014.1733714694252 2024-12-09T03:24:54,258 INFO [RS:1;1617b0b1421f:33301 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 2024-12-09T03:24:54,259 DEBUG [RS:1;1617b0b1421f:33301 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34551:34551),(127.0.0.1/127.0.0.1:45555:45555)] 2024-12-09T03:24:54,340 INFO [1617b0b1421f:34385 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T03:24:54,340 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0d8984204cdec39babdcd759e451afa1, regionState=OPENING, regionLocation=1617b0b1421f,36295,1733714692747 2024-12-09T03:24:54,343 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=0d8984204cdec39babdcd759e451afa1, ASSIGN because future has completed 2024-12-09T03:24:54,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d8984204cdec39babdcd759e451afa1, server=1617b0b1421f,36295,1733714692747}] 2024-12-09T03:24:54,502 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:24:54,502 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0d8984204cdec39babdcd759e451afa1, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:24:54,503 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:24:54,503 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:24:54,503 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:24:54,503 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:24:54,505 INFO [StoreOpener-0d8984204cdec39babdcd759e451afa1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:24:54,507 INFO [StoreOpener-0d8984204cdec39babdcd759e451afa1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d8984204cdec39babdcd759e451afa1 columnFamilyName info 2024-12-09T03:24:54,507 DEBUG [StoreOpener-0d8984204cdec39babdcd759e451afa1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:24:54,508 INFO [StoreOpener-0d8984204cdec39babdcd759e451afa1-1 {}] regionserver.HStore(327): Store=0d8984204cdec39babdcd759e451afa1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:24:54,508 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:24:54,509 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1 2024-12-09T03:24:54,509 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1 2024-12-09T03:24:54,509 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:24:54,510 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:24:54,511 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:24:54,514 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:24:54,515 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0d8984204cdec39babdcd759e451afa1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737250, jitterRate=-0.06253930926322937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T03:24:54,515 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:24:54,516 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0d8984204cdec39babdcd759e451afa1: Running coprocessor pre-open hook at 1733714694503Writing region info on filesystem at 1733714694503Initializing all the Stores at 1733714694504 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714694504Cleaning up temporary data from old regions at 1733714694510 (+6 ms)Running coprocessor post-open hooks at 1733714694515 (+5 ms)Region opened successfully at 1733714694516 (+1 ms) 2024-12-09T03:24:54,517 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1., pid=6, masterSystemTime=1733714694497 2024-12-09T03:24:54,520 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:24:54,520 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:24:54,521 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0d8984204cdec39babdcd759e451afa1, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,36295,1733714692747 2024-12-09T03:24:54,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d8984204cdec39babdcd759e451afa1, server=1617b0b1421f,36295,1733714692747 because future has completed 2024-12-09T03:24:54,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T03:24:54,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0d8984204cdec39babdcd759e451afa1, server=1617b0b1421f,36295,1733714692747 in 182 msec 2024-12-09T03:24:54,530 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T03:24:54,530 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=0d8984204cdec39babdcd759e451afa1, ASSIGN in 344 msec 2024-12-09T03:24:54,531 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T03:24:54,531 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733714694531"}]},"ts":"1733714694531"} 2024-12-09T03:24:54,533 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-09T03:24:54,534 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T03:24:54,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 382 msec 2024-12-09T03:24:56,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T03:24:56,745 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T03:24:56,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T03:24:56,746 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-09T03:24:56,747 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:24:56,747 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T03:24:56,747 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T03:24:56,747 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T03:24:59,334 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:24:59,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:59,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:59,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:59,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:24:59,367 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-09T03:25:04,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34385 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:25:04,244 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-09T03:25:04,244 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-09T03:25:04,251 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T03:25:04,251 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:25:04,264 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:25:04,268 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:25:04,268 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:25:04,268 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:25:04,268 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:25:04,269 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36543cc9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:25:04,269 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e931df5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:25:04,365 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@550b8f7f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/java.io.tmpdir/jetty-localhost-45309-hadoop-hdfs-3_4_1-tests_jar-_-any-788006203679703208/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:04,366 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@177dd5a1{HTTP/1.1, (http/1.1)}{localhost:45309} 2024-12-09T03:25:04,366 INFO [Time-limited test {}] server.Server(415): Started @124616ms 2024-12-09T03:25:04,367 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:25:04,391 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:25:04,393 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:25:04,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:25:04,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:25:04,394 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:25:04,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e2e93ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:25:04,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67588a04{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:25:04,483 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@40718784{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/java.io.tmpdir/jetty-localhost-38327-hadoop-hdfs-3_4_1-tests_jar-_-any-1565703953559081395/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:04,483 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@197e13e7{HTTP/1.1, (http/1.1)}{localhost:38327} 2024-12-09T03:25:04,483 INFO [Time-limited test {}] server.Server(415): Started @124733ms 2024-12-09T03:25:04,484 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:25:04,513 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:25:04,516 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:25:04,517 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:25:04,517 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:25:04,517 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:25:04,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b46502f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:25:04,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@291a3b2c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:25:04,609 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@304b13f9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/java.io.tmpdir/jetty-localhost-34421-hadoop-hdfs-3_4_1-tests_jar-_-any-8317430525544974496/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:04,609 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@291a31b8{HTTP/1.1, (http/1.1)}{localhost:34421} 2024-12-09T03:25:04,609 INFO [Time-limited test {}] server.Server(415): Started @124860ms 2024-12-09T03:25:04,611 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:25:05,792 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5/current/BP-1669423292-172.17.0.3-1733714689922/current, will proceed with Du for space computation calculation, 2024-12-09T03:25:05,792 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6/current/BP-1669423292-172.17.0.3-1733714689922/current, will proceed with Du for space computation calculation, 2024-12-09T03:25:05,808 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:25:05,811 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8791b0a80a4bc53d with lease ID 0xfb01fff9985019c9: Processing first storage report for DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac from datanode DatanodeRegistration(127.0.0.1:34579, datanodeUuid=498374d1-2135-432e-bd98-61bfb0301e60, infoPort=37983, infoSecurePort=0, ipcPort=34005, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922) 2024-12-09T03:25:05,811 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8791b0a80a4bc53d with lease ID 0xfb01fff9985019c9: from storage DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac node DatanodeRegistration(127.0.0.1:34579, datanodeUuid=498374d1-2135-432e-bd98-61bfb0301e60, infoPort=37983, infoSecurePort=0, ipcPort=34005, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:05,811 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8791b0a80a4bc53d with lease ID 0xfb01fff9985019c9: Processing first storage report for DS-b24d0d7f-8b56-4584-bf0f-ea058155c89c from datanode DatanodeRegistration(127.0.0.1:34579, datanodeUuid=498374d1-2135-432e-bd98-61bfb0301e60, infoPort=37983, infoSecurePort=0, ipcPort=34005, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922) 2024-12-09T03:25:05,811 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8791b0a80a4bc53d with lease ID 0xfb01fff9985019c9: from storage DS-b24d0d7f-8b56-4584-bf0f-ea058155c89c node DatanodeRegistration(127.0.0.1:34579, datanodeUuid=498374d1-2135-432e-bd98-61bfb0301e60, infoPort=37983, infoSecurePort=0, ipcPort=34005, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:06,117 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data7/current/BP-1669423292-172.17.0.3-1733714689922/current, will proceed with Du for space computation calculation, 2024-12-09T03:25:06,117 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data8/current/BP-1669423292-172.17.0.3-1733714689922/current, will proceed with Du for space computation calculation, 2024-12-09T03:25:06,135 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:25:06,137 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc70f111bf68d6a1a with lease ID 0xfb01fff9985019ca: Processing first storage report for DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a from datanode DatanodeRegistration(127.0.0.1:45163, datanodeUuid=a451a229-9b8a-4235-b3c6-c67b628af0ee, infoPort=41343, infoSecurePort=0, ipcPort=45227, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922) 2024-12-09T03:25:06,137 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc70f111bf68d6a1a with lease ID 0xfb01fff9985019ca: from storage DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a node DatanodeRegistration(127.0.0.1:45163, datanodeUuid=a451a229-9b8a-4235-b3c6-c67b628af0ee, infoPort=41343, infoSecurePort=0, ipcPort=45227, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:06,137 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc70f111bf68d6a1a with lease ID 0xfb01fff9985019ca: Processing first storage report for DS-abe7d243-9aa7-47b4-9a6a-c3331daef1b9 from datanode DatanodeRegistration(127.0.0.1:45163, datanodeUuid=a451a229-9b8a-4235-b3c6-c67b628af0ee, infoPort=41343, infoSecurePort=0, ipcPort=45227, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922) 2024-12-09T03:25:06,138 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc70f111bf68d6a1a with lease ID 0xfb01fff9985019ca: from storage DS-abe7d243-9aa7-47b4-9a6a-c3331daef1b9 node DatanodeRegistration(127.0.0.1:45163, datanodeUuid=a451a229-9b8a-4235-b3c6-c67b628af0ee, infoPort=41343, infoSecurePort=0, ipcPort=45227, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:06,216 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data9/current/BP-1669423292-172.17.0.3-1733714689922/current, will proceed with Du for space computation calculation, 2024-12-09T03:25:06,216 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data10/current/BP-1669423292-172.17.0.3-1733714689922/current, will proceed with Du for space computation calculation, 2024-12-09T03:25:06,231 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:25:06,233 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe00fe6eb784f9a59 with lease ID 0xfb01fff9985019cb: Processing first storage report for DS-61694d08-21d8-4cb0-b73e-b8eb04c48067 from datanode DatanodeRegistration(127.0.0.1:39961, datanodeUuid=212b4932-7976-4498-81dd-2eff2d42f411, infoPort=35479, infoSecurePort=0, ipcPort=40291, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922) 2024-12-09T03:25:06,233 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe00fe6eb784f9a59 with lease ID 0xfb01fff9985019cb: from storage DS-61694d08-21d8-4cb0-b73e-b8eb04c48067 node DatanodeRegistration(127.0.0.1:39961, datanodeUuid=212b4932-7976-4498-81dd-2eff2d42f411, infoPort=35479, infoSecurePort=0, ipcPort=40291, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T03:25:06,233 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe00fe6eb784f9a59 with lease ID 0xfb01fff9985019cb: Processing first storage report for DS-74f25645-f1a8-43fa-8995-3109145bfaf3 from datanode DatanodeRegistration(127.0.0.1:39961, datanodeUuid=212b4932-7976-4498-81dd-2eff2d42f411, infoPort=35479, infoSecurePort=0, ipcPort=40291, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922) 2024-12-09T03:25:06,233 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe00fe6eb784f9a59 with lease ID 0xfb01fff9985019cb: from storage DS-74f25645-f1a8-43fa-8995-3109145bfaf3 node DatanodeRegistration(127.0.0.1:39961, datanodeUuid=212b4932-7976-4498-81dd-2eff2d42f411, infoPort=35479, infoSecurePort=0, ipcPort=40291, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:06,244 WARN [ResponseProcessor for block BP-1669423292-172.17.0.3-1733714689922:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1669423292-172.17.0.3-1733714689922:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,244 WARN [ResponseProcessor for block BP-1669423292-172.17.0.3-1733714689922:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1669423292-172.17.0.3-1733714689922:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,244 WARN [ResponseProcessor for block BP-1669423292-172.17.0.3-1733714689922:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1669423292-172.17.0.3-1733714689922:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1669423292-172.17.0.3-1733714689922:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,244 WARN [ResponseProcessor for block BP-1669423292-172.17.0.3-1733714689922:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1669423292-172.17.0.3-1733714689922:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1669423292-172.17.0.3-1733714689922:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,245 WARN [DataStreamer for file /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 block BP-1669423292-172.17.0.3-1733714689922:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:06,245 WARN [DataStreamer for file /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577/1617b0b1421f%2C34385%2C1733714692577.1733714692894 block BP-1669423292-172.17.0.3-1733714689922:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:06,245 WARN [DataStreamer for file /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 block BP-1669423292-172.17.0.3-1733714689922:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK], DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:06,245 WARN [DataStreamer for file /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta block BP-1669423292-172.17.0.3-1733714689922:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK], DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:06,245 WARN [PacketResponder: BP-1669423292-172.17.0.3-1733714689922:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44895] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,245 WARN [PacketResponder: BP-1669423292-172.17.0.3-1733714689922:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44895] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,246 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:58848 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44369:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58848 dst: /127.0.0.1:44369 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,246 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1483219341_22 at /127.0.0.1:47882 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44895:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47882 dst: /127.0.0.1:44895 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,246 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1322309326_22 at /127.0.0.1:47816 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44895:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47816 dst: /127.0.0.1:44895 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,246 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:58832 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44369:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58832 dst: /127.0.0.1:44369 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1322309326_22 at /127.0.0.1:58796 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44369:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58796 dst: /127.0.0.1:44369 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1483219341_22 at /127.0.0.1:58876 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44369:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58876 dst: /127.0.0.1:44369 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,248 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:47856 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44895:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47856 dst: /127.0.0.1:44895 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,247 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:47842 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44895:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47842 dst: /127.0.0.1:44895 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7856d523{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:06,249 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c24bf58{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:25:06,249 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:25:06,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3372b5da{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:25:06,250 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62c98b43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,STOPPED} 2024-12-09T03:25:06,251 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:25:06,251 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:25:06,251 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1669423292-172.17.0.3-1733714689922 (Datanode Uuid 83b865ab-3ea8-45a7-ae7b-86b463852204) service to localhost/127.0.0.1:33407 2024-12-09T03:25:06,251 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:25:06,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data3/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:06,252 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:25:06,252 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data4/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:06,253 WARN [DataStreamer for file /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta block BP-1669423292-172.17.0.3-1733714689922:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,253 WARN [DataStreamer for file /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 block BP-1669423292-172.17.0.3-1733714689922:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,253 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@75dcb199 {}] datanode.DataXceiver(331): 127.0.0.1:44369:DataXceiver error processing unknown operation src: /127.0.0.1:58736 dst: /127.0.0.1:44369 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,254 WARN [DataStreamer for file /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577/1617b0b1421f%2C34385%2C1733714692577.1733714692894 block BP-1669423292-172.17.0.3-1733714689922:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1483219341_22 at /127.0.0.1:58738 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44369:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58738 dst: /127.0.0.1:44369 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:06,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32694da{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:06,256 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@150beccb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:25:06,256 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:25:06,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@172c9107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:25:06,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@198a2712{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,STOPPED} 2024-12-09T03:25:06,257 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:25:06,257 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:25:06,257 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:25:06,257 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1669423292-172.17.0.3-1733714689922 (Datanode Uuid 47afa076-6b0a-4d9f-bd88-112849ba6ab9) service to localhost/127.0.0.1:33407 2024-12-09T03:25:06,258 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data1/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:06,258 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data2/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:06,258 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:25:06,261 WARN [ResponseProcessor for block BP-1669423292-172.17.0.3-1733714689922:blk_1073741837_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1669423292-172.17.0.3-1733714689922:blk_1073741837_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,261 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1., hostname=1617b0b1421f,36295,1733714692747, seqNum=2] 2024-12-09T03:25:06,263 ERROR [FSHLog-0-hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce-prefix:1617b0b1421f,36295,1733714692747 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,263 WARN [FSHLog-0-hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce-prefix:1617b0b1421f,36295,1733714692747 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,263 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,263 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C36295%2C1733714692747:(num 1733714693380) roll requested 2024-12-09T03:25:06,263 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C36295%2C1733714692747.1733714706263 2024-12-09T03:25:06,266 WARN [Thread-905 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,266 WARN [Thread-905 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:06,267 WARN [Thread-905 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741838_1018 2024-12-09T03:25:06,269 WARN [Thread-905 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK] 2024-12-09T03:25:06,275 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:06,276 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:06,276 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:06,276 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:06,276 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:06,276 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714706263 2024-12-09T03:25:06,277 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,277 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:06,278 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-09T03:25:06,278 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-09T03:25:06,278 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 2024-12-09T03:25:06,280 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35479:35479),(127.0.0.1/127.0.0.1:41343:41343)] 2024-12-09T03:25:06,280 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 is not closed yet, will try archiving it next time 2024-12-09T03:25:06,282 WARN [IPC Server handler 1 on default port 33407 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-12-09T03:25:06,285 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 after 5ms 2024-12-09T03:25:06,874 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:08,118 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:08,281 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:08,282 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714706263 2024-12-09T03:25:08,284 WARN [ResponseProcessor for block BP-1669423292-172.17.0.3-1733714689922:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1669423292-172.17.0.3-1733714689922:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:08,285 WARN [DataStreamer for file /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714706263 block BP-1669423292-172.17.0.3-1733714689922:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]) is bad. 2024-12-09T03:25:08,286 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:40548 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:39961:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40548 dst: /127.0.0.1:39961 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:08,286 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:38856 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:45163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38856 dst: /127.0.0.1:45163 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:08,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@304b13f9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:08,289 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@291a31b8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:25:08,290 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:25:08,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@291a3b2c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:25:08,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b46502f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,STOPPED} 2024-12-09T03:25:08,292 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:25:08,292 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1669423292-172.17.0.3-1733714689922 (Datanode Uuid 212b4932-7976-4498-81dd-2eff2d42f411) service to localhost/127.0.0.1:33407 2024-12-09T03:25:08,292 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:25:08,292 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:25:08,293 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data9/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:08,293 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data10/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:08,293 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:25:08,875 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:10,118 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:10,282 WARN [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]] 2024-12-09T03:25:10,283 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:10,283 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C36295%2C1733714692747:(num 1733714706263) roll requested 2024-12-09T03:25:10,284 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C36295%2C1733714692747.1733714710283 2024-12-09T03:25:10,287 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 after 4009ms 2024-12-09T03:25:10,292 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:10,292 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]) is bad. 2024-12-09T03:25:10,292 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741840_1022 2024-12-09T03:25:10,293 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK] 2024-12-09T03:25:10,294 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:10,294 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:10,294 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741841_1023 2024-12-09T03:25:10,294 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK] 2024-12-09T03:25:10,295 WARN [Thread-914 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:10,296 WARN [Thread-914 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]) is bad. 2024-12-09T03:25:10,296 WARN [Thread-914 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741842_1024 2024-12-09T03:25:10,296 WARN [Thread-914 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK] 2024-12-09T03:25:10,298 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T03:25:10,300 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:10,301 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:10,301 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:10,301 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:10,301 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:10,301 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714706263 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714710283 2024-12-09T03:25:10,303 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37983:37983),(127.0.0.1/127.0.0.1:41343:41343)] 2024-12-09T03:25:10,303 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 is not closed yet, will try archiving it next time 2024-12-09T03:25:10,303 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714706263 is not closed yet, will try archiving it next time 2024-12-09T03:25:10,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45163 is added to blk_1073741839_1021 (size=3600) 2024-12-09T03:25:10,705 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 is not closed yet, will try archiving it next time 2024-12-09T03:25:10,876 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,119 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,159 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4a0135c1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45163, datanodeUuid=a451a229-9b8a-4235-b3c6-c67b628af0ee, infoPort=41343, infoSecurePort=0, ipcPort=45227, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741839_1021 to 127.0.0.1:44369 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,301 WARN [ResponseProcessor for block BP-1669423292-172.17.0.3-1733714689922:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1669423292-172.17.0.3-1733714689922:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-1669423292-172.17.0.3-1733714689922:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,302 WARN [DataStreamer for file /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714710283 block BP-1669423292-172.17.0.3-1733714689922:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:12,302 WARN [PacketResponder: BP-1669423292-172.17.0.3-1733714689922:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45163] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,302 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59532 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59532 dst: /127.0.0.1:34579 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,302 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:49556 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:45163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49556 dst: /127.0.0.1:45163 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,303 WARN [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]] 2024-12-09T03:25:12,303 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,303 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C36295%2C1733714692747:(num 1733714710283) roll requested 2024-12-09T03:25:12,304 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C36295%2C1733714692747.1733714712303 2024-12-09T03:25:12,306 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,306 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]) is bad. 2024-12-09T03:25:12,306 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741844_1027 2024-12-09T03:25:12,307 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK] 2024-12-09T03:25:12,308 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,308 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]) is bad. 2024-12-09T03:25:12,308 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741845_1028 2024-12-09T03:25:12,309 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK] 2024-12-09T03:25:12,311 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,311 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:12,311 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741846_1029 2024-12-09T03:25:12,311 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK] 2024-12-09T03:25:12,314 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,314 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59554 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741847_1030 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,314 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:12,314 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741847_1030 2024-12-09T03:25:12,314 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59554 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T03:25:12,314 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59554 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59554 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,315 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:12,316 WARN [IPC Server handler 4 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T03:25:12,316 WARN [IPC Server handler 4 on default port 33407 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T03:25:12,316 WARN [IPC Server handler 4 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T03:25:12,319 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:12,319 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:12,319 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:12,319 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:12,319 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:12,319 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714710283 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714712303 2024-12-09T03:25:12,320 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37983:37983)] 2024-12-09T03:25:12,321 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 is not closed yet, will try archiving it next time 2024-12-09T03:25:12,321 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714710283 is not closed yet, will try archiving it next time 2024-12-09T03:25:12,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741843_1026 (size=93) 2024-12-09T03:25:12,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@40718784{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:12,327 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@197e13e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:25:12,327 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:25:12,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67588a04{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:25:12,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e2e93ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,STOPPED} 2024-12-09T03:25:12,328 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:25:12,328 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:25:12,328 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:25:12,328 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1669423292-172.17.0.3-1733714689922 (Datanode Uuid a451a229-9b8a-4235-b3c6-c67b628af0ee) service to localhost/127.0.0.1:33407 2024-12-09T03:25:12,329 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data7/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:12,329 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data8/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:12,329 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:25:12,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] regionserver.HRegion(8855): Flush requested on 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:25:12,338 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d8984204cdec39babdcd759e451afa1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T03:25:12,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/97525798a1684020854fbd472ac31395 is 1080, key is row0002/info:/1733714708294/Put/seqid=0 2024-12-09T03:25:12,358 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,358 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK], DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:12,358 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741849_1032 2024-12-09T03:25:12,359 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:12,360 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,360 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK], DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]) is bad. 2024-12-09T03:25:12,360 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741850_1033 2024-12-09T03:25:12,361 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK] 2024-12-09T03:25:12,362 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,362 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]) is bad. 2024-12-09T03:25:12,362 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741851_1034 2024-12-09T03:25:12,363 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK] 2024-12-09T03:25:12,364 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,364 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:12,364 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741852_1035 2024-12-09T03:25:12,364 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK] 2024-12-09T03:25:12,365 WARN [IPC Server handler 1 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T03:25:12,365 WARN [IPC Server handler 1 on default port 33407 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T03:25:12,365 WARN [IPC Server handler 1 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T03:25:12,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741853_1036 (size=10347) 2024-12-09T03:25:12,723 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 is not closed yet, will try archiving it next time 2024-12-09T03:25:12,725 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714710283 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs/1617b0b1421f%2C36295%2C1733714692747.1733714710283 2024-12-09T03:25:12,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/97525798a1684020854fbd472ac31395 2024-12-09T03:25:12,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/97525798a1684020854fbd472ac31395 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/97525798a1684020854fbd472ac31395 2024-12-09T03:25:12,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/97525798a1684020854fbd472ac31395, entries=5, sequenceid=11, filesize=10.1 K 2024-12-09T03:25:12,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 0d8984204cdec39babdcd759e451afa1 in 453ms, sequenceid=11, compaction requested=false 2024-12-09T03:25:12,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d8984204cdec39babdcd759e451afa1: 2024-12-09T03:25:12,876 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] regionserver.HRegion(8855): Flush requested on 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:25:12,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d8984204cdec39babdcd759e451afa1 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-09T03:25:12,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/f5af270c951c4c6dad0dadfad1daf5ae is 1080, key is row0007/info:/1733714712339/Put/seqid=0 2024-12-09T03:25:12,980 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39961 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59584 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741854_1037 to mirror 127.0.0.1:39961 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,980 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]) is bad. 2024-12-09T03:25:12,980 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741854_1037 2024-12-09T03:25:12,980 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59584 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:12,980 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59584 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59584 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,981 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK] 2024-12-09T03:25:12,982 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,982 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:12,982 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741855_1038 2024-12-09T03:25:12,982 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK] 2024-12-09T03:25:12,984 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44369 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,984 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59598 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741856_1039 to mirror 127.0.0.1:44369 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,984 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]) is bad. 2024-12-09T03:25:12,984 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741856_1039 2024-12-09T03:25:12,984 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59598 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:12,985 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59598 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59598 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,985 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK] 2024-12-09T03:25:12,987 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:12,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59614 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741857_1040 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,987 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:12,987 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741857_1040 2024-12-09T03:25:12,987 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59614 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:12,987 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59614 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59614 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:12,988 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:12,988 WARN [IPC Server handler 4 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T03:25:12,988 WARN [IPC Server handler 4 on default port 33407 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T03:25:12,988 WARN [IPC Server handler 4 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T03:25:12,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741858_1041 (size=12506) 2024-12-09T03:25:13,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/f5af270c951c4c6dad0dadfad1daf5ae 2024-12-09T03:25:13,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/f5af270c951c4c6dad0dadfad1daf5ae as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/f5af270c951c4c6dad0dadfad1daf5ae 2024-12-09T03:25:13,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/f5af270c951c4c6dad0dadfad1daf5ae, entries=7, sequenceid=24, filesize=12.2 K 2024-12-09T03:25:13,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 0d8984204cdec39babdcd759e451afa1 in 440ms, sequenceid=24, compaction requested=false 2024-12-09T03:25:13,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d8984204cdec39babdcd759e451afa1: 2024-12-09T03:25:13,409 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-09T03:25:13,409 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:25:13,409 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/f5af270c951c4c6dad0dadfad1daf5ae because midkey is the same as first or last row 2024-12-09T03:25:14,119 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,321 WARN [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]] 2024-12-09T03:25:14,321 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,322 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C36295%2C1733714692747:(num 1733714712303) roll requested 2024-12-09T03:25:14,322 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C36295%2C1733714692747.1733714714322 2024-12-09T03:25:14,331 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44369 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59636 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741859_1042 to mirror 127.0.0.1:44369 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:14,332 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]) is bad. 2024-12-09T03:25:14,332 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741859_1042 2024-12-09T03:25:14,332 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59636 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T03:25:14,332 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59636 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59636 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:14,333 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK] 2024-12-09T03:25:14,336 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,336 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK], DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]) is bad. 2024-12-09T03:25:14,336 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741860_1043 2024-12-09T03:25:14,337 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK] 2024-12-09T03:25:14,338 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,338 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:14,338 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741861_1044 2024-12-09T03:25:14,339 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK] 2024-12-09T03:25:14,340 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,340 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:14,340 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741862_1045 2024-12-09T03:25:14,340 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:14,341 WARN [IPC Server handler 2 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T03:25:14,341 WARN [IPC Server handler 2 on default port 33407 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T03:25:14,341 WARN [IPC Server handler 2 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T03:25:14,344 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:14,344 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:14,344 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:14,344 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:14,344 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:14,345 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714712303 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714714322 2024-12-09T03:25:14,345 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37983:37983)] 2024-12-09T03:25:14,345 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 is not closed yet, will try archiving it next time 2024-12-09T03:25:14,345 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714712303 is not closed yet, will try archiving it next time 2024-12-09T03:25:14,346 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714706263 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs/1617b0b1421f%2C36295%2C1733714692747.1733714706263 2024-12-09T03:25:14,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741848_1031 (size=24823) 2024-12-09T03:25:14,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] regionserver.HRegion(8855): Flush requested on 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:25:14,404 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d8984204cdec39babdcd759e451afa1 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T03:25:14,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/7021ff5c411d446faa58744b04627e26 is 1079, key is tmprow/info:/1733714714402/Put/seqid=0 2024-12-09T03:25:14,414 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,414 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK], DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:14,414 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741864_1047 2024-12-09T03:25:14,415 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:14,417 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,417 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK], DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]) is bad. 2024-12-09T03:25:14,417 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741865_1048 2024-12-09T03:25:14,418 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK] 2024-12-09T03:25:14,419 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,420 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:14,420 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741866_1049 2024-12-09T03:25:14,420 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK] 2024-12-09T03:25:14,423 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39961 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59650 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741867_1050 to mirror 127.0.0.1:39961 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:14,423 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]) is bad. 2024-12-09T03:25:14,423 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741867_1050 2024-12-09T03:25:14,423 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59650 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:14,424 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59650 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59650 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:14,424 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK] 2024-12-09T03:25:14,425 WARN [IPC Server handler 3 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T03:25:14,425 WARN [IPC Server handler 3 on default port 33407 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T03:25:14,425 WARN [IPC Server handler 3 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T03:25:14,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741868_1051 (size=6027) 2024-12-09T03:25:14,747 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 is not closed yet, will try archiving it next time 2024-12-09T03:25:14,830 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/7021ff5c411d446faa58744b04627e26 2024-12-09T03:25:14,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/7021ff5c411d446faa58744b04627e26 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/7021ff5c411d446faa58744b04627e26 2024-12-09T03:25:14,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/7021ff5c411d446faa58744b04627e26, entries=1, sequenceid=34, filesize=5.9 K 2024-12-09T03:25:14,844 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 0d8984204cdec39babdcd759e451afa1 in 441ms, sequenceid=34, compaction requested=true 2024-12-09T03:25:14,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d8984204cdec39babdcd759e451afa1: 2024-12-09T03:25:14,844 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-09T03:25:14,844 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:25:14,844 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/f5af270c951c4c6dad0dadfad1daf5ae because midkey is the same as first or last row 2024-12-09T03:25:14,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d8984204cdec39babdcd759e451afa1:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:25:14,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:25:14,845 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:25:14,846 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:25:14,846 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HStore(1541): 0d8984204cdec39babdcd759e451afa1/info is initiating minor compaction (all files) 2024-12-09T03:25:14,847 INFO [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d8984204cdec39babdcd759e451afa1/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:25:14,847 INFO [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/97525798a1684020854fbd472ac31395, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/f5af270c951c4c6dad0dadfad1daf5ae, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/7021ff5c411d446faa58744b04627e26] into tmpdir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp, totalSize=28.2 K 2024-12-09T03:25:14,848 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] compactions.Compactor(225): Compacting 97525798a1684020854fbd472ac31395, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733714708294 2024-12-09T03:25:14,848 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] compactions.Compactor(225): Compacting f5af270c951c4c6dad0dadfad1daf5ae, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733714712339 2024-12-09T03:25:14,849 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7021ff5c411d446faa58744b04627e26, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733714714402 2024-12-09T03:25:14,861 INFO [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d8984204cdec39babdcd759e451afa1#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:25:14,861 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/bb4d86c5d1fd4be5b01e46c7549a7c69 is 1080, key is row0002/info:/1733714708294/Put/seqid=0 2024-12-09T03:25:14,863 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,863 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK], DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]) is bad. 2024-12-09T03:25:14,863 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741869_1052 2024-12-09T03:25:14,864 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK] 2024-12-09T03:25:14,865 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,865 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]) is bad. 2024-12-09T03:25:14,865 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741870_1053 2024-12-09T03:25:14,866 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK] 2024-12-09T03:25:14,868 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,868 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59688 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741871_1054 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:14,868 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:14,868 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741871_1054 2024-12-09T03:25:14,868 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59688 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:14,868 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59688 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59688 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:14,869 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:14,870 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:14,870 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:14,870 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741872_1055 2024-12-09T03:25:14,870 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK] 2024-12-09T03:25:14,871 WARN [IPC Server handler 2 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T03:25:14,871 WARN [IPC Server handler 2 on default port 33407 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T03:25:14,871 WARN [IPC Server handler 2 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T03:25:14,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741873_1056 (size=17994) 2024-12-09T03:25:14,877 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:15,286 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/bb4d86c5d1fd4be5b01e46c7549a7c69 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/bb4d86c5d1fd4be5b01e46c7549a7c69 2024-12-09T03:25:15,294 INFO [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0d8984204cdec39babdcd759e451afa1/info of 0d8984204cdec39babdcd759e451afa1 into bb4d86c5d1fd4be5b01e46c7549a7c69(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:25:15,294 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d8984204cdec39babdcd759e451afa1: 2024-12-09T03:25:15,294 INFO [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1., storeName=0d8984204cdec39babdcd759e451afa1/info, priority=13, startTime=1733714714844; duration=0sec 2024-12-09T03:25:15,294 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T03:25:15,294 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:25:15,294 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/bb4d86c5d1fd4be5b01e46c7549a7c69 because midkey is the same as first or last row 2024-12-09T03:25:15,295 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T03:25:15,295 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:25:15,295 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/bb4d86c5d1fd4be5b01e46c7549a7c69 because midkey is the same as first or last row 2024-12-09T03:25:15,295 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T03:25:15,295 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:25:15,295 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/bb4d86c5d1fd4be5b01e46c7549a7c69 because midkey is the same as first or last row 2024-12-09T03:25:15,295 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:25:15,295 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d8984204cdec39babdcd759e451afa1:info 2024-12-09T03:25:15,813 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@702d8f7e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34579, datanodeUuid=498374d1-2135-432e-bd98-61bfb0301e60, infoPort=37983, infoSecurePort=0, ipcPort=34005, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741843_1026 to 127.0.0.1:39961 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:15,813 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7eea6f0b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34579, datanodeUuid=498374d1-2135-432e-bd98-61bfb0301e60, infoPort=37983, infoSecurePort=0, ipcPort=34005, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741853_1036 to 127.0.0.1:44369 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:15,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] regionserver.HRegion(8855): Flush requested on 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:25:15,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d8984204cdec39babdcd759e451afa1 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T03:25:15,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/e4c77cc34b434b568a2be5e4563f9d24 is 1079, key is tmprow/info:/1733714715831/Put/seqid=0 2024-12-09T03:25:15,842 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59696 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741874_1057 to mirror 127.0.0.1:39961 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:15,842 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39961 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:15,842 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59696 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:15,843 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK]) is bad. 2024-12-09T03:25:15,843 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741874_1057 2024-12-09T03:25:15,843 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59696 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59696 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:15,844 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39961,DS-61694d08-21d8-4cb0-b73e-b8eb04c48067,DISK] 2024-12-09T03:25:15,847 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44369 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:15,847 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59708 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741875_1058 to mirror 127.0.0.1:44369 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:15,847 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]) is bad. 2024-12-09T03:25:15,847 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741875_1058 2024-12-09T03:25:15,847 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59708 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:15,847 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:59708 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59708 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:15,847 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK] 2024-12-09T03:25:15,849 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:15,849 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]) is bad. 2024-12-09T03:25:15,849 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741876_1059 2024-12-09T03:25:15,850 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44895,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK] 2024-12-09T03:25:15,851 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:15,851 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:15,851 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741877_1060 2024-12-09T03:25:15,851 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:15,852 WARN [IPC Server handler 3 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T03:25:15,852 WARN [IPC Server handler 3 on default port 33407 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T03:25:15,852 WARN [IPC Server handler 3 on default port 33407 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T03:25:15,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741878_1061 (size=6027) 2024-12-09T03:25:16,120 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:16,257 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/e4c77cc34b434b568a2be5e4563f9d24 2024-12-09T03:25:16,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/e4c77cc34b434b568a2be5e4563f9d24 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/e4c77cc34b434b568a2be5e4563f9d24 2024-12-09T03:25:16,275 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/e4c77cc34b434b568a2be5e4563f9d24, entries=1, sequenceid=45, filesize=5.9 K 2024-12-09T03:25:16,276 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 0d8984204cdec39babdcd759e451afa1 in 444ms, sequenceid=45, compaction requested=false 2024-12-09T03:25:16,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d8984204cdec39babdcd759e451afa1: 2024-12-09T03:25:16,277 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-09T03:25:16,277 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:25:16,277 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/bb4d86c5d1fd4be5b01e46c7549a7c69 because midkey is the same as first or last row 2024-12-09T03:25:16,346 WARN [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-09T03:25:16,346 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:16,458 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:25:16,461 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:25:16,462 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:25:16,462 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:25:16,462 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:25:16,463 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f43bae2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:25:16,463 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f8625f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:25:16,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3fc9ace2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/java.io.tmpdir/jetty-localhost-38843-hadoop-hdfs-3_4_1-tests_jar-_-any-3269090120080762303/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:16,555 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@c3131b{HTTP/1.1, (http/1.1)}{localhost:38843} 2024-12-09T03:25:16,555 INFO [Time-limited test {}] server.Server(415): Started @136805ms 2024-12-09T03:25:16,556 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:25:16,815 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@702d8f7e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34579, datanodeUuid=498374d1-2135-432e-bd98-61bfb0301e60, infoPort=37983, infoSecurePort=0, ipcPort=34005, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741848_1031 to 127.0.0.1:39961 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:16,815 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7eea6f0b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34579, datanodeUuid=498374d1-2135-432e-bd98-61bfb0301e60, infoPort=37983, infoSecurePort=0, ipcPort=34005, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741858_1041 to 127.0.0.1:39961 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:16,877 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:17,166 WARN [Thread-982 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:25:17,174 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf41cdfc9fc4b3f1e with lease ID 0xfb01fff9985019cc: from storage DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a node DatanodeRegistration(127.0.0.1:37671, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34251, infoSecurePort=0, ipcPort=37047, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T03:25:17,174 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf41cdfc9fc4b3f1e with lease ID 0xfb01fff9985019cc: from storage DS-a96b63e4-0105-4d91-9f70-441e6d7d18e3 node DatanodeRegistration(127.0.0.1:37671, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34251, infoSecurePort=0, ipcPort=37047, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:18,120 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:18,347 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:18,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741873_1056 (size=17994) 2024-12-09T03:25:18,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741868_1051 (size=6027) 2024-12-09T03:25:18,878 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:19,813 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@702d8f7e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34579, datanodeUuid=498374d1-2135-432e-bd98-61bfb0301e60, infoPort=37983, infoSecurePort=0, ipcPort=34005, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741878_1061 to 127.0.0.1:39961 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:20,121 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:20,347 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:20,878 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:22,122 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:22,347 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:22,553 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T03:25:22,879 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:23,108 ERROR [FSHLog-0-hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData-prefix:1617b0b1421f,34385,1733714692577 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:23,108 WARN [FSHLog-0-hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData-prefix:1617b0b1421f,34385,1733714692577 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:23,108 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C34385%2C1733714692577:(num 1733714692894) roll requested 2024-12-09T03:25:23,109 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C34385%2C1733714692577.1733714723109 2024-12-09T03:25:23,112 WARN [Thread-1004 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:23,112 WARN [Thread-1004 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK], DatanodeInfoWithStorage[127.0.0.1:37671,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:23,112 WARN [Thread-1004 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741879_1062 2024-12-09T03:25:23,113 WARN [Thread-1004 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:23,119 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:23,119 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:23,119 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:23,119 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:23,120 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:23,120 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577/1617b0b1421f%2C34385%2C1733714692577.1733714692894 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577/1617b0b1421f%2C34385%2C1733714692577.1733714723109 2024-12-09T03:25:23,120 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:23,121 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:23,121 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577/1617b0b1421f%2C34385%2C1733714692577.1733714692894 2024-12-09T03:25:23,121 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37983:37983),(127.0.0.1/127.0.0.1:34251:34251)] 2024-12-09T03:25:23,121 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577/1617b0b1421f%2C34385%2C1733714692577.1733714692894 is not closed yet, will try archiving it next time 2024-12-09T03:25:23,121 WARN [IPC Server handler 1 on default port 33407 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577/1617b0b1421f%2C34385%2C1733714692577.1733714692894 has not been closed. Lease recovery is in progress. RecoveryId = 1064 for block blk_1073741830_1006 2024-12-09T03:25:23,122 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577/1617b0b1421f%2C34385%2C1733714692577.1733714692894 after 1ms 2024-12-09T03:25:24,122 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:24,348 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:26,123 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:26,348 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:27,123 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577/1617b0b1421f%2C34385%2C1733714692577.1733714692894 after 4002ms 2024-12-09T03:25:27,171 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@46ef10cc[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37671, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34251, infoSecurePort=0, ipcPort=37047, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741835_1011 to 127.0.0.1:39961 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:27,172 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@765c29de[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37671, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34251, infoSecurePort=0, ipcPort=37047, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741831_1007 to 127.0.0.1:39961 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:27,185 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7752c253 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1669423292-172.17.0.3-1733714689922:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:44369,null,null]) java.net.ConnectException: Call From 1617b0b1421f/172.17.0.3 to localhost:36631 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T03:25:27,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741833_1020 (size=455) 2024-12-09T03:25:27,307 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714693380 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs/1617b0b1421f%2C36295%2C1733714692747.1733714693380 2024-12-09T03:25:27,308 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714712303 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs/1617b0b1421f%2C36295%2C1733714692747.1733714712303 2024-12-09T03:25:28,123 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:28,172 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@765c29de[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37671, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34251, infoSecurePort=0, ipcPort=37047, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741829_1005 to 127.0.0.1:45163 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:28,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:25:28,348 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:30,123 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:30,173 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@46ef10cc[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37671, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34251, infoSecurePort=0, ipcPort=37047, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741826_1002 to 127.0.0.1:39961 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:30,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741833_1020 (size=455) 2024-12-09T03:25:30,349 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,124 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,227 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C36295%2C1733714692747.1733714732227 2024-12-09T03:25:32,234 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,234 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,234 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,234 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,234 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,234 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714714322 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714732227 2024-12-09T03:25:32,236 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37983:37983),(127.0.0.1/127.0.0.1:34251:34251)] 2024-12-09T03:25:32,236 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714714322 is not closed yet, will try archiving it next time 2024-12-09T03:25:32,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741863_1046 (size=13591) 2024-12-09T03:25:32,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36295 {}] regionserver.HRegion(8855): Flush requested on 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:25:32,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0d8984204cdec39babdcd759e451afa1 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T03:25:32,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/2f44063a8d584f32af5461efb8b68ce9 is 1080, key is row0013/info:/1733714732237/Put/seqid=0 2024-12-09T03:25:32,255 WARN [Thread-1023 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,255 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41248 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741882_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data4]'}, localName='127.0.0.1:37671', datanodeUuid='83b865ab-3ea8-45a7-ae7b-86b463852204', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741882_1066 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,255 WARN [Thread-1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37671,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:32,255 WARN [Thread-1023 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741882_1066 2024-12-09T03:25:32,255 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41248 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741882_1066] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:32,256 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41248 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741882_1066] {}] datanode.DataXceiver(331): 127.0.0.1:37671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41248 dst: /127.0.0.1:37671 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,256 WARN [Thread-1023 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:32,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741883_1067 (size=11421) 2024-12-09T03:25:32,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741883_1067 (size=11421) 2024-12-09T03:25:32,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/2f44063a8d584f32af5461efb8b68ce9 2024-12-09T03:25:32,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/2f44063a8d584f32af5461efb8b68ce9 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/2f44063a8d584f32af5461efb8b68ce9 2024-12-09T03:25:32,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/2f44063a8d584f32af5461efb8b68ce9, entries=6, sequenceid=55, filesize=11.2 K 2024-12-09T03:25:32,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 0d8984204cdec39babdcd759e451afa1 in 32ms, sequenceid=55, compaction requested=true 2024-12-09T03:25:32,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0d8984204cdec39babdcd759e451afa1: 2024-12-09T03:25:32,279 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-12-09T03:25:32,279 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:25:32,279 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/bb4d86c5d1fd4be5b01e46c7549a7c69 because midkey is the same as first or last row 2024-12-09T03:25:32,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d8984204cdec39babdcd759e451afa1:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:25:32,279 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:25:32,279 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:25:32,280 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:25:32,280 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HStore(1541): 0d8984204cdec39babdcd759e451afa1/info is initiating minor compaction (all files) 2024-12-09T03:25:32,281 INFO [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d8984204cdec39babdcd759e451afa1/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:25:32,281 INFO [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/bb4d86c5d1fd4be5b01e46c7549a7c69, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/e4c77cc34b434b568a2be5e4563f9d24, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/2f44063a8d584f32af5461efb8b68ce9] into tmpdir=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp, totalSize=34.6 K 2024-12-09T03:25:32,281 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] compactions.Compactor(225): Compacting bb4d86c5d1fd4be5b01e46c7549a7c69, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733714708294 2024-12-09T03:25:32,281 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] compactions.Compactor(225): Compacting e4c77cc34b434b568a2be5e4563f9d24, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733714715831 2024-12-09T03:25:32,282 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2f44063a8d584f32af5461efb8b68ce9, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733714716241 2024-12-09T03:25:32,298 INFO [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d8984204cdec39babdcd759e451afa1#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:25:32,299 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/6178110b2fc2425da2be182a1c0356d5 is 1080, key is row0002/info:/1733714708294/Put/seqid=0 2024-12-09T03:25:32,301 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,301 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:58036 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741884_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741884_1068 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,302 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:32,302 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741884_1068 2024-12-09T03:25:32,302 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:58036 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741884_1068] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:32,302 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:58036 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741884_1068] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58036 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,302 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:32,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741885_1069 (size=23502) 2024-12-09T03:25:32,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741885_1069 (size=23502) 2024-12-09T03:25:32,317 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/6178110b2fc2425da2be182a1c0356d5 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/6178110b2fc2425da2be182a1c0356d5 2024-12-09T03:25:32,326 INFO [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0d8984204cdec39babdcd759e451afa1/info of 0d8984204cdec39babdcd759e451afa1 into 6178110b2fc2425da2be182a1c0356d5(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:25:32,326 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d8984204cdec39babdcd759e451afa1: 2024-12-09T03:25:32,326 INFO [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1., storeName=0d8984204cdec39babdcd759e451afa1/info, priority=13, startTime=1733714732279; duration=0sec 2024-12-09T03:25:32,326 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-09T03:25:32,326 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:25:32,326 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/6178110b2fc2425da2be182a1c0356d5 because midkey is the same as first or last row 2024-12-09T03:25:32,327 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-09T03:25:32,327 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:25:32,327 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/6178110b2fc2425da2be182a1c0356d5 because midkey is the same as first or last row 2024-12-09T03:25:32,327 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-09T03:25:32,327 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:25:32,327 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/6178110b2fc2425da2be182a1c0356d5 because midkey is the same as first or last row 2024-12-09T03:25:32,327 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:25:32,327 DEBUG [RS:0;1617b0b1421f:36295-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d8984204cdec39babdcd759e451afa1:info 2024-12-09T03:25:32,349 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-09T03:25:32,349 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T03:25:32,463 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:25:32,463 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:25:32,463 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:25:32,463 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:25:32,463 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T03:25:32,463 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T03:25:32,464 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=818436833, stopped=false 2024-12-09T03:25:32,464 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1617b0b1421f,34385,1733714692577 2024-12-09T03:25:32,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:25:32,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:25:32,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:32,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:32,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33301-0x100089ad0930002, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:25:32,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33301-0x100089ad0930002, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:32,620 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:25:32,620 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:25:32,620 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33301-0x100089ad0930002, quorum=127.0.0.1:57669, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:25:32,620 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:25:32,621 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:25:32,621 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:25:32,621 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:25:32,621 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1617b0b1421f,36295,1733714692747' ***** 2024-12-09T03:25:32,621 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:25:32,621 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1617b0b1421f,33301,1733714694014' ***** 2024-12-09T03:25:32,621 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:25:32,621 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:25:32,621 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:25:32,621 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:25:32,621 INFO [RS:0;1617b0b1421f:36295 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:25:32,621 INFO [RS:0;1617b0b1421f:36295 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:25:32,621 INFO [RS:1;1617b0b1421f:33301 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:25:32,622 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(3091): Received CLOSE for 0d8984204cdec39babdcd759e451afa1 2024-12-09T03:25:32,622 INFO [RS:1;1617b0b1421f:33301 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:25:32,622 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer(959): stopping server 1617b0b1421f,33301,1733714694014 2024-12-09T03:25:32,622 INFO [RS:1;1617b0b1421f:33301 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:25:32,622 INFO [RS:1;1617b0b1421f:33301 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;1617b0b1421f:33301. 2024-12-09T03:25:32,622 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(959): stopping server 1617b0b1421f,36295,1733714692747 2024-12-09T03:25:32,622 DEBUG [RS:1;1617b0b1421f:33301 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:25:32,622 INFO [RS:0;1617b0b1421f:36295 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:25:32,622 DEBUG [RS:1;1617b0b1421f:33301 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:25:32,622 INFO [RS:0;1617b0b1421f:36295 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1617b0b1421f:36295. 2024-12-09T03:25:32,622 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer(976): stopping server 1617b0b1421f,33301,1733714694014; all regions closed. 2024-12-09T03:25:32,622 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0d8984204cdec39babdcd759e451afa1, disabling compactions & flushes 2024-12-09T03:25:32,622 DEBUG [RS:0;1617b0b1421f:36295 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:25:32,622 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:25:32,622 DEBUG [RS:0;1617b0b1421f:36295 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:25:32,622 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:25:32,622 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. after waiting 0 ms 2024-12-09T03:25:32,622 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:25:32,622 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:25:32,622 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:25:32,622 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 0d8984204cdec39babdcd759e451afa1 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-12-09T03:25:32,624 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,624 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,624 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,624 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,624 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,625 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,625 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:25:32,625 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T03:25:32,625 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,625 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 2024-12-09T03:25:32,625 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:25:32,626 WARN [IPC Server handler 1 on default port 33407 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 has not been closed. Lease recovery is in progress. RecoveryId = 1070 for block blk_1073741837_1015 2024-12-09T03:25:32,626 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 after 1ms 2024-12-09T03:25:32,629 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T03:25:32,629 DEBUG [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(1325): Online Regions={0d8984204cdec39babdcd759e451afa1=TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T03:25:32,629 DEBUG [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(1351): Waiting on 0d8984204cdec39babdcd759e451afa1, 1588230740 2024-12-09T03:25:32,629 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:25:32,629 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:25:32,629 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:25:32,629 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:25:32,629 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:25:32,629 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-09T03:25:32,629 ERROR [FSHLog-0-hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce-prefix:1617b0b1421f,36295,1733714692747.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,629 WARN [FSHLog-0-hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce-prefix:1617b0b1421f,36295,1733714692747.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,630 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C36295%2C1733714692747.meta:.meta(num 1733714693819) roll requested 2024-12-09T03:25:32,630 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C36295%2C1733714692747.meta.1733714732630.meta 2024-12-09T03:25:32,631 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/399f904e28c64953ba362bbbbb079b70 is 1080, key is row0018/info:/1733714732248/Put/seqid=0 2024-12-09T03:25:32,633 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,633 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41296 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741886_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data4]'}, localName='127.0.0.1:37671', datanodeUuid='83b865ab-3ea8-45a7-ae7b-86b463852204', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741886_1071 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:58054 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741887_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741887_1072 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,634 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741886_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37671,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:32,634 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741887_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:32,634 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741887_1072 2024-12-09T03:25:32,634 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:58054 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741887_1072] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:32,634 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741886_1071 2024-12-09T03:25:32,634 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41296 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741886_1071] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T03:25:32,634 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:58054 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741887_1072] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58054 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,634 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41296 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741886_1071] {}] datanode.DataXceiver(331): 127.0.0.1:37671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41296 dst: /127.0.0.1:37671 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,634 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:32,634 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:32,637 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.1733714714322 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs/1617b0b1421f%2C36295%2C1733714692747.1733714714322 2024-12-09T03:25:32,639 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,639 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741888_1073 (size=11421) 2024-12-09T03:25:32,640 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741888_1073 (size=11421) 2024-12-09T03:25:32,640 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,640 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/399f904e28c64953ba362bbbbb079b70 2024-12-09T03:25:32,641 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,641 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714732630.meta 2024-12-09T03:25:32,644 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,645 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44369,DS-7d5d72d1-44c8-4ed1-8c62-cd17bd9e0020,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,645 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta 2024-12-09T03:25:32,645 WARN [IPC Server handler 3 on default port 33407 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741834_1010 2024-12-09T03:25:32,646 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta after 0ms 2024-12-09T03:25:32,646 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34251:34251),(127.0.0.1/127.0.0.1:37983:37983)] 2024-12-09T03:25:32,646 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta is not closed yet, will try archiving it next time 2024-12-09T03:25:32,648 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/.tmp/info/399f904e28c64953ba362bbbbb079b70 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/399f904e28c64953ba362bbbbb079b70 2024-12-09T03:25:32,654 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/399f904e28c64953ba362bbbbb079b70, entries=6, sequenceid=65, filesize=11.2 K 2024-12-09T03:25:32,655 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 0d8984204cdec39babdcd759e451afa1 in 33ms, sequenceid=65, compaction requested=false 2024-12-09T03:25:32,656 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/97525798a1684020854fbd472ac31395, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/f5af270c951c4c6dad0dadfad1daf5ae, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/bb4d86c5d1fd4be5b01e46c7549a7c69, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/7021ff5c411d446faa58744b04627e26, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/e4c77cc34b434b568a2be5e4563f9d24, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/2f44063a8d584f32af5461efb8b68ce9] to archive 2024-12-09T03:25:32,657 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T03:25:32,659 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/97525798a1684020854fbd472ac31395 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/97525798a1684020854fbd472ac31395 2024-12-09T03:25:32,662 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/f5af270c951c4c6dad0dadfad1daf5ae to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/f5af270c951c4c6dad0dadfad1daf5ae 2024-12-09T03:25:32,664 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/bb4d86c5d1fd4be5b01e46c7549a7c69 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/bb4d86c5d1fd4be5b01e46c7549a7c69 2024-12-09T03:25:32,665 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/.tmp/info/dec1f5b7786e48bea348ef94c365213b is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1./info:regioninfo/1733714694521/Put/seqid=0 2024-12-09T03:25:32,666 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/7021ff5c411d446faa58744b04627e26 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/7021ff5c411d446faa58744b04627e26 2024-12-09T03:25:32,667 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/e4c77cc34b434b568a2be5e4563f9d24 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/e4c77cc34b434b568a2be5e4563f9d24 2024-12-09T03:25:32,667 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41318 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741890_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data4]'}, localName='127.0.0.1:37671', datanodeUuid='83b865ab-3ea8-45a7-ae7b-86b463852204', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741890_1076 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,668 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741890_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37671,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:32,668 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741890_1076 2024-12-09T03:25:32,668 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41318 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741890_1076] {}] datanode.BlockReceiver(316): Block 1073741890 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:32,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41318 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741890_1076] {}] datanode.DataXceiver(331): 127.0.0.1:37671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41318 dst: /127.0.0.1:37671 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,668 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:32,669 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/2f44063a8d584f32af5461efb8b68ce9 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/info/2f44063a8d584f32af5461efb8b68ce9 2024-12-09T03:25:32,670 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1617b0b1421f:34385 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T03:25:32,670 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [97525798a1684020854fbd472ac31395=10347, f5af270c951c4c6dad0dadfad1daf5ae=12506, bb4d86c5d1fd4be5b01e46c7549a7c69=17994, 7021ff5c411d446faa58744b04627e26=6027, e4c77cc34b434b568a2be5e4563f9d24=6027, 2f44063a8d584f32af5461efb8b68ce9=11421] 2024-12-09T03:25:32,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741891_1077 (size=7089) 2024-12-09T03:25:32,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741891_1077 (size=7089) 2024-12-09T03:25:32,674 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/.tmp/info/dec1f5b7786e48bea348ef94c365213b 2024-12-09T03:25:32,674 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/default/TestLogRolling-testLogRollOnDatanodeDeath/0d8984204cdec39babdcd759e451afa1/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-12-09T03:25:32,675 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:25:32,675 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0d8984204cdec39babdcd759e451afa1: Waiting for close lock at 1733714732622Running coprocessor pre-close hooks at 1733714732622Disabling compacts and flushes for region at 1733714732622Disabling writes for close at 1733714732622Obtaining lock to block concurrent updates at 1733714732622Preparing flush snapshotting stores in 0d8984204cdec39babdcd759e451afa1 at 1733714732622Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1733714732623 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. at 1733714732623Flushing 0d8984204cdec39babdcd759e451afa1/info: creating writer at 1733714732623Flushing 0d8984204cdec39babdcd759e451afa1/info: appending metadata at 1733714732631 (+8 ms)Flushing 0d8984204cdec39babdcd759e451afa1/info: closing flushed file at 1733714732631Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5093b574: reopening flushed file at 1733714732647 (+16 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 0d8984204cdec39babdcd759e451afa1 in 33ms, sequenceid=65, compaction requested=false at 1733714732655 (+8 ms)Writing region close event to WAL at 1733714732670 (+15 ms)Running coprocessor post-close hooks at 1733714732674 (+4 ms)Closed at 1733714732675 (+1 ms) 2024-12-09T03:25:32,675 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733714694151.0d8984204cdec39babdcd759e451afa1. 2024-12-09T03:25:32,699 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/.tmp/ns/16b2f82880144f109a4439fd948b1209 is 43, key is default/ns:d/1733714693904/Put/seqid=0 2024-12-09T03:25:32,702 WARN [Thread-1062 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:32,702 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41336 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741892_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data4]'}, localName='127.0.0.1:37671', datanodeUuid='83b865ab-3ea8-45a7-ae7b-86b463852204', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741892_1078 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,702 WARN [Thread-1062 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741892_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37671,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:32,702 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41336 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741892_1078] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:32,702 WARN [Thread-1062 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741892_1078 2024-12-09T03:25:32,702 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_371533478_22 at /127.0.0.1:41336 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741892_1078] {}] datanode.DataXceiver(331): 127.0.0.1:37671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41336 dst: /127.0.0.1:37671 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:32,703 WARN [Thread-1062 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:32,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741893_1079 (size=5153) 2024-12-09T03:25:32,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741893_1079 (size=5153) 2024-12-09T03:25:32,709 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/.tmp/ns/16b2f82880144f109a4439fd948b1209 2024-12-09T03:25:32,728 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/.tmp/table/06a4a14a2e084a2a94216efc791559d7 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733714694531/Put/seqid=0 2024-12-09T03:25:32,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741894_1080 (size=5424) 2024-12-09T03:25:32,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741894_1080 (size=5424) 2024-12-09T03:25:32,739 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/.tmp/table/06a4a14a2e084a2a94216efc791559d7 2024-12-09T03:25:32,757 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/.tmp/info/dec1f5b7786e48bea348ef94c365213b as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/info/dec1f5b7786e48bea348ef94c365213b 2024-12-09T03:25:32,765 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/info/dec1f5b7786e48bea348ef94c365213b, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T03:25:32,766 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/.tmp/ns/16b2f82880144f109a4439fd948b1209 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/ns/16b2f82880144f109a4439fd948b1209 2024-12-09T03:25:32,773 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/ns/16b2f82880144f109a4439fd948b1209, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T03:25:32,774 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/.tmp/table/06a4a14a2e084a2a94216efc791559d7 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/table/06a4a14a2e084a2a94216efc791559d7 2024-12-09T03:25:32,781 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/table/06a4a14a2e084a2a94216efc791559d7, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T03:25:32,783 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false 2024-12-09T03:25:32,800 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T03:25:32,801 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:25:32,801 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:25:32,801 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714732629Running coprocessor pre-close hooks at 1733714732629Disabling compacts and flushes for region at 1733714732629Disabling writes for close at 1733714732629Obtaining lock to block concurrent updates at 1733714732629Preparing flush snapshotting stores in 1588230740 at 1733714732629Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733714732629Flushing stores of hbase:meta,,1.1588230740 at 1733714732646 (+17 ms)Flushing 1588230740/info: creating writer at 1733714732646Flushing 1588230740/info: appending metadata at 1733714732664 (+18 ms)Flushing 1588230740/info: closing flushed file at 1733714732664Flushing 1588230740/ns: creating writer at 1733714732683 (+19 ms)Flushing 1588230740/ns: appending metadata at 1733714732698 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733714732698Flushing 1588230740/table: creating writer at 1733714732715 (+17 ms)Flushing 1588230740/table: appending metadata at 1733714732728 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733714732728Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@381910f6: reopening flushed file at 1733714732745 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35abf154: reopening flushed file at 1733714732765 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3be8087c: reopening flushed file at 1733714732773 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 154ms, sequenceid=11, compaction requested=false at 1733714732783 (+10 ms)Writing region close event to WAL at 1733714732796 (+13 ms)Running coprocessor post-close hooks at 1733714732801 (+5 ms)Closed at 1733714732801 2024-12-09T03:25:32,802 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T03:25:32,829 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(976): stopping server 1617b0b1421f,36295,1733714692747; all regions closed. 2024-12-09T03:25:32,829 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,829 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,829 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,830 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,830 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:32,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741889_1074 (size=825) 2024-12-09T03:25:32,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741889_1074 (size=825) 2024-12-09T03:25:33,125 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T03:25:33,125 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T03:25:33,247 INFO [regionserver/1617b0b1421f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:25:33,286 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T03:25:33,287 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T03:25:33,814 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@702d8f7e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34579, datanodeUuid=498374d1-2135-432e-bd98-61bfb0301e60, infoPort=37983, infoSecurePort=0, ipcPort=34005, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741863_1046 to 127.0.0.1:45163 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:34,024 INFO [master/1617b0b1421f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T03:25:34,024 INFO [master/1617b0b1421f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T03:25:34,124 INFO [regionserver/1617b0b1421f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:25:34,174 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@46ef10cc[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37671, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34251, infoSecurePort=0, ipcPort=37047, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741836_1012 to 127.0.0.1:45163 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:34,174 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@765c29de[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37671, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34251, infoSecurePort=0, ipcPort=37047, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741825_1001 to 127.0.0.1:45163 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:35,174 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@46ef10cc[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37671, datanodeUuid=83b865ab-3ea8-45a7-ae7b-86b463852204, infoPort=34251, infoSecurePort=0, ipcPort=37047, storageInfo=lv=-57;cid=testClusterID;nsid=681617659;c=1733714689922):Failed to transfer BP-1669423292-172.17.0.3-1733714689922:blk_1073741828_1004 to 127.0.0.1:45163 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:35,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:25:36,627 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 after 4002ms 2024-12-09T03:25:36,646 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta after 4001ms 2024-12-09T03:25:36,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T03:25:36,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:25:36,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T03:25:37,190 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@e0ba5ed {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1669423292-172.17.0.3-1733714689922:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:44369,null,null]) java.net.ConnectException: Call From 1617b0b1421f/172.17.0.3 to localhost:36631 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T03:25:37,625 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T03:25:37,627 DEBUG [RS:1;1617b0b1421f:33301 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs 2024-12-09T03:25:37,627 INFO [RS:1;1617b0b1421f:33301 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C33301%2C1733714694014:(num 1733714694252) 2024-12-09T03:25:37,627 DEBUG [RS:1;1617b0b1421f:33301 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:25:37,627 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:25:37,629 INFO [RS:1;1617b0b1421f:33301 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:25:37,629 INFO [RS:1;1617b0b1421f:33301 {}] hbase.ChoreService(370): Chore service for: regionserver/1617b0b1421f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T03:25:37,629 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:25:37,629 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:25:37,629 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:25:37,629 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:25:37,629 INFO [RS:1;1617b0b1421f:33301 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:25:37,629 INFO [RS:1;1617b0b1421f:33301 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33301 2024-12-09T03:25:37,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:37,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:37,687 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:37,687 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:37,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:37,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:37,688 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:37,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33301-0x100089ad0930002, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1617b0b1421f,33301,1733714694014 2024-12-09T03:25:37,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:25:37,691 INFO [RS:1;1617b0b1421f:33301 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:25:37,703 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1617b0b1421f,33301,1733714694014] 2024-12-09T03:25:37,706 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:37,706 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:37,713 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1617b0b1421f,33301,1733714694014 already deleted, retry=false 2024-12-09T03:25:37,713 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1617b0b1421f,33301,1733714694014 expired; onlineServers=1 2024-12-09T03:25:37,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33301-0x100089ad0930002, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:25:37,803 INFO [RS:1;1617b0b1421f:33301 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:25:37,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33301-0x100089ad0930002, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:25:37,803 INFO [RS:1;1617b0b1421f:33301 {}] regionserver.HRegionServer(1031): Exiting; stopping=1617b0b1421f,33301,1733714694014; zookeeper connection closed. 2024-12-09T03:25:37,804 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1ac894b0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1ac894b0 2024-12-09T03:25:37,830 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T03:25:37,833 DEBUG [RS:0;1617b0b1421f:36295 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs 2024-12-09T03:25:37,834 INFO [RS:0;1617b0b1421f:36295 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C36295%2C1733714692747.meta:.meta(num 1733714732630) 2024-12-09T03:25:37,834 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:37,834 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:37,834 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:37,834 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:37,834 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:37,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741881_1065 (size=15140) 2024-12-09T03:25:37,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741881_1065 (size=15140) 2024-12-09T03:25:37,838 DEBUG [RS:0;1617b0b1421f:36295 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs 2024-12-09T03:25:37,838 INFO [RS:0;1617b0b1421f:36295 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C36295%2C1733714692747:(num 1733714732227) 2024-12-09T03:25:37,838 DEBUG [RS:0;1617b0b1421f:36295 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:25:37,838 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:25:37,838 INFO [RS:0;1617b0b1421f:36295 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:25:37,838 INFO [RS:0;1617b0b1421f:36295 {}] hbase.ChoreService(370): Chore service for: regionserver/1617b0b1421f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T03:25:37,838 INFO [RS:0;1617b0b1421f:36295 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:25:37,838 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:25:37,838 INFO [RS:0;1617b0b1421f:36295 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36295 2024-12-09T03:25:37,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1617b0b1421f,36295,1733714692747 2024-12-09T03:25:37,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:25:37,861 INFO [RS:0;1617b0b1421f:36295 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:25:37,862 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1617b0b1421f,36295,1733714692747] 2024-12-09T03:25:37,871 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1617b0b1421f,36295,1733714692747 already deleted, retry=false 2024-12-09T03:25:37,871 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1617b0b1421f,36295,1733714692747 expired; onlineServers=0 2024-12-09T03:25:37,871 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1617b0b1421f,34385,1733714692577' ***** 2024-12-09T03:25:37,871 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T03:25:37,871 INFO [M:0;1617b0b1421f:34385 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:25:37,872 INFO [M:0;1617b0b1421f:34385 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:25:37,872 DEBUG [M:0;1617b0b1421f:34385 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T03:25:37,872 DEBUG [M:0;1617b0b1421f:34385 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T03:25:37,872 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T03:25:37,872 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714693109 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714693109,5,FailOnTimeoutGroup] 2024-12-09T03:25:37,872 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714693110 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714693110,5,FailOnTimeoutGroup] 2024-12-09T03:25:37,872 INFO [M:0;1617b0b1421f:34385 {}] hbase.ChoreService(370): Chore service for: master/1617b0b1421f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T03:25:37,872 INFO [M:0;1617b0b1421f:34385 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:25:37,872 DEBUG [M:0;1617b0b1421f:34385 {}] master.HMaster(1795): Stopping service threads 2024-12-09T03:25:37,872 INFO [M:0;1617b0b1421f:34385 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T03:25:37,872 INFO [M:0;1617b0b1421f:34385 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:25:37,872 INFO [M:0;1617b0b1421f:34385 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T03:25:37,872 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T03:25:37,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T03:25:37,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:37,892 DEBUG [M:0;1617b0b1421f:34385 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-09T03:25:37,893 DEBUG [M:0;1617b0b1421f:34385 {}] master.ActiveMasterManager(353): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-09T03:25:37,893 INFO [M:0;1617b0b1421f:34385 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/.lastflushedseqids 2024-12-09T03:25:37,896 WARN [Thread-1081 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:37,896 WARN [Thread-1081 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741895_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:37,896 WARN [Thread-1081 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741895_1081 2024-12-09T03:25:37,897 WARN [Thread-1081 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:37,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741896_1082 (size=130) 2024-12-09T03:25:37,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741896_1082 (size=130) 2024-12-09T03:25:37,902 INFO [M:0;1617b0b1421f:34385 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T03:25:37,902 INFO [M:0;1617b0b1421f:34385 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T03:25:37,902 DEBUG [M:0;1617b0b1421f:34385 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:25:37,902 INFO [M:0;1617b0b1421f:34385 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:25:37,902 DEBUG [M:0;1617b0b1421f:34385 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:25:37,902 DEBUG [M:0;1617b0b1421f:34385 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:25:37,902 DEBUG [M:0;1617b0b1421f:34385 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:25:37,902 INFO [M:0;1617b0b1421f:34385 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-12-09T03:25:37,918 DEBUG [M:0;1617b0b1421f:34385 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c678eafaf0674496b8eab737fed1921c is 82, key is hbase:meta,,1/info:regioninfo/1733714693851/Put/seqid=0 2024-12-09T03:25:37,920 WARN [Thread-1087 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:37,920 WARN [Thread-1087 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK], DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:37,920 WARN [Thread-1087 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741897_1083 2024-12-09T03:25:37,921 WARN [Thread-1087 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:37,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741898_1084 (size=5672) 2024-12-09T03:25:37,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741898_1084 (size=5672) 2024-12-09T03:25:37,926 INFO [M:0;1617b0b1421f:34385 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c678eafaf0674496b8eab737fed1921c 2024-12-09T03:25:37,946 DEBUG [M:0;1617b0b1421f:34385 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7da9a2c0f79643309f822c8258422338 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733714694536/Put/seqid=0 2024-12-09T03:25:37,948 WARN [Thread-1094 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:37,948 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1322309326_22 at /127.0.0.1:41402 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data4]'}, localName='127.0.0.1:37671', datanodeUuid='83b865ab-3ea8-45a7-ae7b-86b463852204', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741899_1085 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:37,949 WARN [Thread-1094 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37671,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:37,949 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1322309326_22 at /127.0.0.1:41402 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:37,949 WARN [Thread-1094 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741899_1085 2024-12-09T03:25:37,949 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1322309326_22 at /127.0.0.1:41402 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:37671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41402 dst: /127.0.0.1:37671 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:37,949 WARN [Thread-1094 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:37,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741900_1086 (size=6255) 2024-12-09T03:25:37,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741900_1086 (size=6255) 2024-12-09T03:25:37,958 INFO [M:0;1617b0b1421f:34385 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7da9a2c0f79643309f822c8258422338 2024-12-09T03:25:37,964 INFO [M:0;1617b0b1421f:34385 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7da9a2c0f79643309f822c8258422338 2024-12-09T03:25:37,976 DEBUG [M:0;1617b0b1421f:34385 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/63771018d4b54f0b9e5dbcb5de42b642 is 69, key is 1617b0b1421f,33301,1733714694014/rs:state/1733714694092/Put/seqid=0 2024-12-09T03:25:37,979 WARN [Thread-1101 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:37,979 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1322309326_22 at /127.0.0.1:41420 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741901_1087] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data4]'}, localName='127.0.0.1:37671', datanodeUuid='83b865ab-3ea8-45a7-ae7b-86b463852204', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741901_1087 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:37,979 WARN [Thread-1101 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37671,DS-1c4085a6-aa75-4433-84db-cb0eb9c97f1a,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:37,979 WARN [Thread-1101 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741901_1087 2024-12-09T03:25:37,979 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1322309326_22 at /127.0.0.1:41420 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741901_1087] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:37,979 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1322309326_22 at /127.0.0.1:41420 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741901_1087] {}] datanode.DataXceiver(331): 127.0.0.1:37671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41420 dst: /127.0.0.1:37671 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:37,980 WARN [Thread-1101 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:37,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:25:37,982 INFO [RS:0;1617b0b1421f:36295 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:25:37,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36295-0x100089ad0930001, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:25:37,982 INFO [RS:0;1617b0b1421f:36295 {}] regionserver.HRegionServer(1031): Exiting; stopping=1617b0b1421f,36295,1733714692747; zookeeper connection closed. 2024-12-09T03:25:37,982 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6d48aa28 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6d48aa28 2024-12-09T03:25:37,982 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-09T03:25:37,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741902_1088 (size=5224) 2024-12-09T03:25:37,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741902_1088 (size=5224) 2024-12-09T03:25:37,985 INFO [M:0;1617b0b1421f:34385 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/63771018d4b54f0b9e5dbcb5de42b642 2024-12-09T03:25:38,005 DEBUG [M:0;1617b0b1421f:34385 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/24b0c4492fd947519d924f4208628070 is 52, key is load_balancer_on/state:d/1733714693997/Put/seqid=0 2024-12-09T03:25:38,008 WARN [Thread-1108 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45163 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:38,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1322309326_22 at /127.0.0.1:58160 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741903_1089] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6]'}, localName='127.0.0.1:34579', datanodeUuid='498374d1-2135-432e-bd98-61bfb0301e60', xmitsInProgress=0}:Exception transferring block BP-1669423292-172.17.0.3-1733714689922:blk_1073741903_1089 to mirror 127.0.0.1:45163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:38,008 WARN [Thread-1108 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1669423292-172.17.0.3-1733714689922:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34579,DS-c5ab5fbb-523f-48ac-ab77-17f49c867eac,DISK], DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK]) is bad. 2024-12-09T03:25:38,008 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1322309326_22 at /127.0.0.1:58160 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741903_1089] {}] datanode.BlockReceiver(316): Block 1073741903 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T03:25:38,008 WARN [Thread-1108 {}] hdfs.DataStreamer(1850): Abandoning BP-1669423292-172.17.0.3-1733714689922:blk_1073741903_1089 2024-12-09T03:25:38,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1322309326_22 at /127.0.0.1:58160 [Receiving block BP-1669423292-172.17.0.3-1733714689922:blk_1073741903_1089] {}] datanode.DataXceiver(331): 127.0.0.1:34579:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58160 dst: /127.0.0.1:34579 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:38,009 WARN [Thread-1108 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45163,DS-92a73da9-2e46-4620-8ee5-2b07a1703c0a,DISK] 2024-12-09T03:25:38,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741904_1090 (size=5056) 2024-12-09T03:25:38,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741904_1090 (size=5056) 2024-12-09T03:25:38,024 INFO [M:0;1617b0b1421f:34385 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/24b0c4492fd947519d924f4208628070 2024-12-09T03:25:38,030 DEBUG [M:0;1617b0b1421f:34385 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c678eafaf0674496b8eab737fed1921c as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c678eafaf0674496b8eab737fed1921c 2024-12-09T03:25:38,037 INFO [M:0;1617b0b1421f:34385 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c678eafaf0674496b8eab737fed1921c, entries=8, sequenceid=60, filesize=5.5 K 2024-12-09T03:25:38,038 DEBUG [M:0;1617b0b1421f:34385 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7da9a2c0f79643309f822c8258422338 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7da9a2c0f79643309f822c8258422338 2024-12-09T03:25:38,045 INFO [M:0;1617b0b1421f:34385 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7da9a2c0f79643309f822c8258422338 2024-12-09T03:25:38,045 INFO [M:0;1617b0b1421f:34385 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7da9a2c0f79643309f822c8258422338, entries=6, sequenceid=60, filesize=6.1 K 2024-12-09T03:25:38,046 DEBUG [M:0;1617b0b1421f:34385 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/63771018d4b54f0b9e5dbcb5de42b642 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/63771018d4b54f0b9e5dbcb5de42b642 2024-12-09T03:25:38,052 INFO [M:0;1617b0b1421f:34385 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/63771018d4b54f0b9e5dbcb5de42b642, entries=2, sequenceid=60, filesize=5.1 K 2024-12-09T03:25:38,053 DEBUG [M:0;1617b0b1421f:34385 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/24b0c4492fd947519d924f4208628070 as hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/24b0c4492fd947519d924f4208628070 2024-12-09T03:25:38,058 INFO [M:0;1617b0b1421f:34385 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/24b0c4492fd947519d924f4208628070, entries=1, sequenceid=60, filesize=4.9 K 2024-12-09T03:25:38,059 INFO [M:0;1617b0b1421f:34385 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 157ms, sequenceid=60, compaction requested=false 2024-12-09T03:25:38,061 INFO [M:0;1617b0b1421f:34385 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:25:38,061 DEBUG [M:0;1617b0b1421f:34385 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714737902Disabling compacts and flushes for region at 1733714737902Disabling writes for close at 1733714737902Obtaining lock to block concurrent updates at 1733714737902Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733714737902Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1733714737903 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733714737903Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733714737903Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733714737918 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733714737918Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733714737932 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733714737946 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733714737946Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733714737964 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733714737976 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733714737976Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733714737990 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733714738004 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733714738004Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24c99076: reopening flushed file at 1733714738029 (+25 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75f59026: reopening flushed file at 1733714738037 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2aec888b: reopening flushed file at 1733714738046 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fa0b853: reopening flushed file at 1733714738052 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 157ms, sequenceid=60, compaction requested=false at 1733714738059 (+7 ms)Writing region close event to WAL at 1733714738061 (+2 ms)Closed at 1733714738061 2024-12-09T03:25:38,062 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:38,062 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:38,062 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:38,062 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:38,062 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:38,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37671 is added to blk_1073741880_1063 (size=1045) 2024-12-09T03:25:38,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34579 is added to blk_1073741880_1063 (size=1045) 2024-12-09T03:25:38,130 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/WALs/1617b0b1421f,34385,1733714692577/1617b0b1421f%2C34385%2C1733714692577.1733714692894 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/oldWALs/1617b0b1421f%2C34385%2C1733714692577.1733714692894 2024-12-09T03:25:38,133 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/MasterData/oldWALs/1617b0b1421f%2C34385%2C1733714692577.1733714692894 to hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/oldWALs/1617b0b1421f%2C34385%2C1733714692577.1733714692894$masterlocalwal$ 2024-12-09T03:25:38,134 INFO [M:0;1617b0b1421f:34385 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T03:25:38,134 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:25:38,134 INFO [M:0;1617b0b1421f:34385 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34385 2024-12-09T03:25:38,134 INFO [M:0;1617b0b1421f:34385 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:25:38,208 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:25:38,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:38,218 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:38,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:38,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:38,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:38,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:38,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:38,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:38,245 INFO [M:0;1617b0b1421f:34385 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:25:38,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:25:38,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34385-0x100089ad0930000, quorum=127.0.0.1:57669, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:25:38,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3fc9ace2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:38,255 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@c3131b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:25:38,255 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:25:38,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f8625f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:25:38,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f43bae2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,STOPPED} 2024-12-09T03:25:38,257 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:25:38,257 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:25:38,257 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:25:38,257 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@177d3d1c {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1669423292-172.17.0.3-1733714689922:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:44369,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:36631 , LocalHost:localPort 1617b0b1421f/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T03:25:38,257 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1669423292-172.17.0.3-1733714689922 (Datanode Uuid 83b865ab-3ea8-45a7-ae7b-86b463852204) service to localhost/127.0.0.1:33407 2024-12-09T03:25:38,258 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data3/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:38,258 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data4/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:38,258 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@177d3d1c {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1669423292-172.17.0.3-1733714689922:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:44369,null,null], DatanodeInfoWithStorage[127.0.0.1:37671,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1669423292-172.17.0.3-1733714689922 2024-12-09T03:25:38,259 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:25:38,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@550b8f7f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:38,269 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@177dd5a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:25:38,269 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:25:38,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e931df5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:25:38,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36543cc9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,STOPPED} 2024-12-09T03:25:38,272 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:25:38,272 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:25:38,272 WARN [BP-1669423292-172.17.0.3-1733714689922 heartbeating to localhost/127.0.0.1:33407 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1669423292-172.17.0.3-1733714689922 (Datanode Uuid 498374d1-2135-432e-bd98-61bfb0301e60) service to localhost/127.0.0.1:33407 2024-12-09T03:25:38,272 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:25:38,272 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data5/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:38,272 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/cluster_3c6318c4-5bf0-1c20-d8de-2a0554c12995/data/data6/current/BP-1669423292-172.17.0.3-1733714689922 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:38,273 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:25:38,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58a4fc41{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:25:38,278 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e92d0c5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:25:38,278 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:25:38,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37564f36{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:25:38,279 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@281d64b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir/,STOPPED} 2024-12-09T03:25:38,288 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T03:25:38,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T03:25:38,329 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33407 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33407 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:46167 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f5670bf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f5670bf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:33407 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33407 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:33407 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46167 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:33407 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33407 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:33407 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:33407 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33407 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:33407 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=175 (was 144) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5675 (was 6313) 2024-12-09T03:25:38,337 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=175, ProcessCount=11, AvailableMemoryMB=5675 2024-12-09T03:25:38,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T03:25:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.log.dir so I do NOT create it in target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f 2024-12-09T03:25:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ed95a04d-e626-fc5a-df63-60d044f1f404/hadoop.tmp.dir so I do NOT create it in target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f 2024-12-09T03:25:38,338 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79, deleteOnExit=true 2024-12-09T03:25:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T03:25:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/test.cache.data in system properties and HBase conf 2024-12-09T03:25:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T03:25:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir in system properties and HBase conf 2024-12-09T03:25:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T03:25:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T03:25:38,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T03:25:38,339 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T03:25:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:25:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:25:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T03:25:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:25:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T03:25:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T03:25:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:25:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:25:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T03:25:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/nfs.dump.dir in system properties and HBase conf 2024-12-09T03:25:38,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/java.io.tmpdir in system properties and HBase conf 2024-12-09T03:25:38,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:25:38,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T03:25:38,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T03:25:38,353 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:25:38,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:38,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:38,688 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:25:38,694 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:25:38,695 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:25:38,696 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:25:38,696 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:25:38,696 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:25:38,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3cc9fb65{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:25:38,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@512c947f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:25:38,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73211bd8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/java.io.tmpdir/jetty-localhost-32819-hadoop-hdfs-3_4_1-tests_jar-_-any-2874225743984291958/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:25:38,797 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34714045{HTTP/1.1, (http/1.1)}{localhost:32819} 2024-12-09T03:25:38,797 INFO [Time-limited test {}] server.Server(415): Started @159047ms 2024-12-09T03:25:38,814 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:25:39,062 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:25:39,066 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:25:39,067 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:25:39,067 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:25:39,067 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:25:39,067 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5074bb5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:25:39,067 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@798b58c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:25:39,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77432f4c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/java.io.tmpdir/jetty-localhost-35943-hadoop-hdfs-3_4_1-tests_jar-_-any-2090504502348661514/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:39,168 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@71e2a51d{HTTP/1.1, (http/1.1)}{localhost:35943} 2024-12-09T03:25:39,168 INFO [Time-limited test {}] server.Server(415): Started @159418ms 2024-12-09T03:25:39,169 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:25:39,202 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:25:39,206 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:25:39,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:25:39,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:25:39,208 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:25:39,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@156f9288{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:25:39,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a03251a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:25:39,309 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@604d132d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/java.io.tmpdir/jetty-localhost-41733-hadoop-hdfs-3_4_1-tests_jar-_-any-17302471683758912436/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:39,309 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6be7442f{HTTP/1.1, (http/1.1)}{localhost:41733} 2024-12-09T03:25:39,309 INFO [Time-limited test {}] server.Server(415): Started @159560ms 2024-12-09T03:25:39,311 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:25:39,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:39,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:40,301 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data2/current/BP-1833210865-172.17.0.3-1733714738365/current, will proceed with Du for space computation calculation, 2024-12-09T03:25:40,301 WARN [Thread-1205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data1/current/BP-1833210865-172.17.0.3-1733714738365/current, will proceed with Du for space computation calculation, 2024-12-09T03:25:40,315 WARN [Thread-1169 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:25:40,317 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x305c1a63873de76c with lease ID 0x21c5436d2a3c3ab9: Processing first storage report for DS-5afc482f-7096-4e90-89ad-18145eb98dfd from datanode DatanodeRegistration(127.0.0.1:39969, datanodeUuid=6a965f7b-9083-469a-be74-ef1f38191bc1, infoPort=46871, infoSecurePort=0, ipcPort=41629, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365) 2024-12-09T03:25:40,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x305c1a63873de76c with lease ID 0x21c5436d2a3c3ab9: from storage DS-5afc482f-7096-4e90-89ad-18145eb98dfd node DatanodeRegistration(127.0.0.1:39969, datanodeUuid=6a965f7b-9083-469a-be74-ef1f38191bc1, infoPort=46871, infoSecurePort=0, ipcPort=41629, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:40,317 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x305c1a63873de76c with lease ID 0x21c5436d2a3c3ab9: Processing first storage report for DS-f0c74e4f-48e6-4ba2-a972-99bb18815a7f from datanode DatanodeRegistration(127.0.0.1:39969, datanodeUuid=6a965f7b-9083-469a-be74-ef1f38191bc1, infoPort=46871, infoSecurePort=0, ipcPort=41629, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365) 2024-12-09T03:25:40,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x305c1a63873de76c with lease ID 0x21c5436d2a3c3ab9: from storage DS-f0c74e4f-48e6-4ba2-a972-99bb18815a7f node DatanodeRegistration(127.0.0.1:39969, datanodeUuid=6a965f7b-9083-469a-be74-ef1f38191bc1, infoPort=46871, infoSecurePort=0, ipcPort=41629, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:40,443 WARN [Thread-1216 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data3/current/BP-1833210865-172.17.0.3-1733714738365/current, will proceed with Du for space computation calculation, 2024-12-09T03:25:40,443 WARN [Thread-1217 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data4/current/BP-1833210865-172.17.0.3-1733714738365/current, will proceed with Du for space computation calculation, 2024-12-09T03:25:40,458 WARN [Thread-1192 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:25:40,460 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbaa47312144b501d with lease ID 0x21c5436d2a3c3aba: Processing first storage report for DS-3ce81d50-f164-4be3-9411-b131b58fc4a8 from datanode DatanodeRegistration(127.0.0.1:33755, datanodeUuid=1cfda00a-8060-4140-9a0f-159f6b31e046, infoPort=44973, infoSecurePort=0, ipcPort=35977, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365) 2024-12-09T03:25:40,460 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbaa47312144b501d with lease ID 0x21c5436d2a3c3aba: from storage DS-3ce81d50-f164-4be3-9411-b131b58fc4a8 node DatanodeRegistration(127.0.0.1:33755, datanodeUuid=1cfda00a-8060-4140-9a0f-159f6b31e046, infoPort=44973, infoSecurePort=0, ipcPort=35977, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:40,460 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbaa47312144b501d with lease ID 0x21c5436d2a3c3aba: Processing first storage report for DS-299814fd-e3b5-4f7d-9e40-e74d73ab7889 from datanode DatanodeRegistration(127.0.0.1:33755, datanodeUuid=1cfda00a-8060-4140-9a0f-159f6b31e046, infoPort=44973, infoSecurePort=0, ipcPort=35977, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365) 2024-12-09T03:25:40,460 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbaa47312144b501d with lease ID 0x21c5436d2a3c3aba: from storage DS-299814fd-e3b5-4f7d-9e40-e74d73ab7889 node DatanodeRegistration(127.0.0.1:33755, datanodeUuid=1cfda00a-8060-4140-9a0f-159f6b31e046, infoPort=44973, infoSecurePort=0, ipcPort=35977, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:40,542 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f 2024-12-09T03:25:40,545 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/zookeeper_0, clientPort=50970, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T03:25:40,549 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50970 2024-12-09T03:25:40,549 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:25:40,551 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:25:40,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:25:40,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:25:40,562 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9 with version=8 2024-12-09T03:25:40,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/hbase-staging 2024-12-09T03:25:40,565 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:25:40,565 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:25:40,565 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:25:40,565 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:25:40,565 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:25:40,565 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:25:40,565 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T03:25:40,566 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:25:40,567 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:32973 2024-12-09T03:25:40,569 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32973 connecting to ZooKeeper ensemble=127.0.0.1:50970 2024-12-09T03:25:40,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329730x0, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:25:40,635 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32973-0x100089b8c070000 connected 2024-12-09T03:25:40,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:40,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:40,717 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:25:40,718 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:25:40,721 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:25:40,721 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9, hbase.cluster.distributed=false 2024-12-09T03:25:40,723 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:25:40,728 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32973 2024-12-09T03:25:40,731 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32973 2024-12-09T03:25:40,732 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32973 2024-12-09T03:25:40,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32973 2024-12-09T03:25:40,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32973 2024-12-09T03:25:40,753 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:25:40,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:25:40,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:25:40,754 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:25:40,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:25:40,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:25:40,754 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:25:40,754 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:25:40,755 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35529 2024-12-09T03:25:40,757 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35529 connecting to ZooKeeper ensemble=127.0.0.1:50970 2024-12-09T03:25:40,757 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:25:40,759 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:25:40,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:355290x0, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:25:40,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:355290x0, quorum=127.0.0.1:50970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:25:40,770 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35529-0x100089b8c070001 connected 2024-12-09T03:25:40,770 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:25:40,777 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:25:40,777 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:25:40,778 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:25:40,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35529 2024-12-09T03:25:40,780 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35529 2024-12-09T03:25:40,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35529 2024-12-09T03:25:40,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35529 2024-12-09T03:25:40,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35529 2024-12-09T03:25:40,793 DEBUG [M:0;1617b0b1421f:32973 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1617b0b1421f:32973 2024-12-09T03:25:40,793 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1617b0b1421f,32973,1733714740564 2024-12-09T03:25:40,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:25:40,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:25:40,802 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1617b0b1421f,32973,1733714740564 2024-12-09T03:25:40,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:40,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:25:40,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:40,812 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T03:25:40,812 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1617b0b1421f,32973,1733714740564 from backup master directory 2024-12-09T03:25:40,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1617b0b1421f,32973,1733714740564 2024-12-09T03:25:40,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:25:40,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:25:40,822 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:25:40,822 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1617b0b1421f,32973,1733714740564 2024-12-09T03:25:40,827 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/hbase.id] with ID: c1f17dfa-ca71-4eba-8da9-c9d809ab6208 2024-12-09T03:25:40,827 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/.tmp/hbase.id 2024-12-09T03:25:40,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:25:40,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:25:41,233 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/.tmp/hbase.id]:[hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/hbase.id] 2024-12-09T03:25:41,247 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:25:41,247 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T03:25:41,248 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T03:25:41,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:41,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:41,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:25:41,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:25:41,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:41,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:41,728 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:25:41,729 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T03:25:41,729 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:25:41,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:25:41,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:25:41,744 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store 2024-12-09T03:25:41,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:25:41,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:25:41,752 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:25:41,752 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:25:41,752 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:25:41,753 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:25:41,753 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:25:41,753 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:25:41,753 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:25:41,753 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714741752Disabling compacts and flushes for region at 1733714741752Disabling writes for close at 1733714741753 (+1 ms)Writing region close event to WAL at 1733714741753Closed at 1733714741753 2024-12-09T03:25:41,754 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/.initializing 2024-12-09T03:25:41,754 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564 2024-12-09T03:25:41,756 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C32973%2C1733714740564, suffix=, logDir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564, archiveDir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/oldWALs, maxLogs=10 2024-12-09T03:25:41,757 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C32973%2C1733714740564.1733714741756 2024-12-09T03:25:41,764 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564/1617b0b1421f%2C32973%2C1733714740564.1733714741756 2024-12-09T03:25:41,773 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46871:46871),(127.0.0.1/127.0.0.1:44973:44973)] 2024-12-09T03:25:41,775 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:25:41,776 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:25:41,776 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,776 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,777 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T03:25:41,779 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:41,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:25:41,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,781 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T03:25:41,781 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:41,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:25:41,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,783 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T03:25:41,783 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:41,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:25:41,785 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T03:25:41,786 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:41,787 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:25:41,787 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,787 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,788 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,789 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,789 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,789 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:25:41,790 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:25:41,792 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:25:41,793 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722457, jitterRate=-0.08134844899177551}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:25:41,793 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733714741776Initializing all the Stores at 1733714741777 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714741777Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714741777Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714741777Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714741777Cleaning up temporary data from old regions at 1733714741789 (+12 ms)Region opened successfully at 1733714741793 (+4 ms) 2024-12-09T03:25:41,793 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T03:25:41,796 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26fa0ad3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:25:41,797 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T03:25:41,797 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T03:25:41,797 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T03:25:41,798 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T03:25:41,798 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T03:25:41,799 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T03:25:41,799 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T03:25:41,801 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T03:25:41,802 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T03:25:41,843 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T03:25:41,843 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T03:25:41,844 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T03:25:41,853 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T03:25:41,854 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T03:25:41,855 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T03:25:41,864 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T03:25:41,865 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T03:25:41,874 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T03:25:41,877 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T03:25:41,885 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T03:25:41,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:25:41,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:25:41,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:41,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:41,896 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1617b0b1421f,32973,1733714740564, sessionid=0x100089b8c070000, setting cluster-up flag (Was=false) 2024-12-09T03:25:41,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:41,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:41,948 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T03:25:41,950 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,32973,1733714740564 2024-12-09T03:25:41,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:41,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:42,001 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T03:25:42,002 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,32973,1733714740564 2024-12-09T03:25:42,004 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T03:25:42,006 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T03:25:42,006 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T03:25:42,006 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T03:25:42,006 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1617b0b1421f,32973,1733714740564 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T03:25:42,008 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:25:42,008 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:25:42,008 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:25:42,008 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:25:42,008 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1617b0b1421f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T03:25:42,008 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,008 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:25:42,008 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,009 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733714772009 2024-12-09T03:25:42,009 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T03:25:42,009 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T03:25:42,009 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T03:25:42,010 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T03:25:42,010 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:25:42,010 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T03:25:42,010 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T03:25:42,010 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T03:25:42,011 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:42,011 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T03:25:42,017 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,017 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T03:25:42,017 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T03:25:42,017 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T03:25:42,018 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T03:25:42,018 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T03:25:42,018 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714742018,5,FailOnTimeoutGroup] 2024-12-09T03:25:42,019 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714742018,5,FailOnTimeoutGroup] 2024-12-09T03:25:42,019 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,019 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T03:25:42,019 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,019 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:25:42,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:25:42,023 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T03:25:42,023 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9 2024-12-09T03:25:42,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:25:42,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:25:42,032 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:25:42,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:25:42,035 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:25:42,035 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:42,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:25:42,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:25:42,037 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:25:42,037 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:42,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:25:42,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:25:42,039 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:25:42,039 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:42,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:25:42,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:25:42,041 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:25:42,041 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:42,041 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:25:42,041 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:25:42,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740 2024-12-09T03:25:42,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740 2024-12-09T03:25:42,044 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:25:42,044 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:25:42,044 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:25:42,046 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:25:42,053 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:25:42,054 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827536, jitterRate=0.052267611026763916}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:25:42,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733714742032Initializing all the Stores at 1733714742033 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714742033Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714742033Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714742033Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714742033Cleaning up temporary data from old regions at 1733714742044 (+11 ms)Region opened successfully at 1733714742054 (+10 ms) 2024-12-09T03:25:42,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:25:42,054 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:25:42,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:25:42,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:25:42,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:25:42,055 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:25:42,055 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714742054Disabling compacts and flushes for region at 1733714742054Disabling writes for close at 1733714742054Writing region close event to WAL at 1733714742055 (+1 ms)Closed at 1733714742055 2024-12-09T03:25:42,056 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:25:42,056 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T03:25:42,056 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T03:25:42,057 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:25:42,058 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T03:25:42,085 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(746): ClusterId : c1f17dfa-ca71-4eba-8da9-c9d809ab6208 2024-12-09T03:25:42,085 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:25:42,096 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:25:42,096 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:25:42,107 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:25:42,108 DEBUG [RS:0;1617b0b1421f:35529 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67d03189, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:25:42,118 DEBUG [RS:0;1617b0b1421f:35529 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1617b0b1421f:35529 2024-12-09T03:25:42,118 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:25:42,118 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:25:42,118 DEBUG [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:25:42,119 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(2659): reportForDuty to master=1617b0b1421f,32973,1733714740564 with port=35529, startcode=1733714740753 2024-12-09T03:25:42,119 DEBUG [RS:0;1617b0b1421f:35529 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:25:42,121 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57065, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:25:42,122 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32973 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1617b0b1421f,35529,1733714740753 2024-12-09T03:25:42,122 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32973 {}] master.ServerManager(517): Registering regionserver=1617b0b1421f,35529,1733714740753 2024-12-09T03:25:42,123 DEBUG [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9 2024-12-09T03:25:42,123 DEBUG [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33821 2024-12-09T03:25:42,123 DEBUG [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:25:42,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:25:42,135 DEBUG [RS:0;1617b0b1421f:35529 {}] zookeeper.ZKUtil(111): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1617b0b1421f,35529,1733714740753 2024-12-09T03:25:42,136 WARN [RS:0;1617b0b1421f:35529 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:25:42,136 INFO [RS:0;1617b0b1421f:35529 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:25:42,136 DEBUG [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753 2024-12-09T03:25:42,136 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1617b0b1421f,35529,1733714740753] 2024-12-09T03:25:42,139 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:25:42,145 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:25:42,145 INFO [RS:0;1617b0b1421f:35529 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:25:42,145 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,145 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:25:42,146 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:25:42,146 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:25:42,147 DEBUG [RS:0;1617b0b1421f:35529 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:25:42,148 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,149 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,149 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,149 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,149 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,149 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,35529,1733714740753-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:25:42,170 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:25:42,170 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,35529,1733714740753-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,170 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,170 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.Replication(171): 1617b0b1421f,35529,1733714740753 started 2024-12-09T03:25:42,191 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,191 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(1482): Serving as 1617b0b1421f,35529,1733714740753, RpcServer on 1617b0b1421f/172.17.0.3:35529, sessionid=0x100089b8c070001 2024-12-09T03:25:42,192 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:25:42,192 DEBUG [RS:0;1617b0b1421f:35529 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1617b0b1421f,35529,1733714740753 2024-12-09T03:25:42,192 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,35529,1733714740753' 2024-12-09T03:25:42,192 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:25:42,192 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:25:42,193 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:25:42,193 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:25:42,193 DEBUG [RS:0;1617b0b1421f:35529 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1617b0b1421f,35529,1733714740753 2024-12-09T03:25:42,193 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,35529,1733714740753' 2024-12-09T03:25:42,193 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:25:42,194 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:25:42,194 DEBUG [RS:0;1617b0b1421f:35529 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:25:42,194 INFO [RS:0;1617b0b1421f:35529 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:25:42,194 INFO [RS:0;1617b0b1421f:35529 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:25:42,209 WARN [1617b0b1421f:32973 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T03:25:42,297 INFO [RS:0;1617b0b1421f:35529 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C35529%2C1733714740753, suffix=, logDir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753, archiveDir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/oldWALs, maxLogs=32 2024-12-09T03:25:42,298 INFO [RS:0;1617b0b1421f:35529 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C35529%2C1733714740753.1733714742297 2024-12-09T03:25:42,305 INFO [RS:0;1617b0b1421f:35529 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 2024-12-09T03:25:42,311 DEBUG [RS:0;1617b0b1421f:35529 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46871:46871),(127.0.0.1/127.0.0.1:44973:44973)] 2024-12-09T03:25:42,459 DEBUG [1617b0b1421f:32973 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T03:25:42,460 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1617b0b1421f,35529,1733714740753 2024-12-09T03:25:42,461 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,35529,1733714740753, state=OPENING 2024-12-09T03:25:42,472 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T03:25:42,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:42,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:25:42,483 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:25:42,483 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:25:42,483 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:25:42,483 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,35529,1733714740753}] 2024-12-09T03:25:42,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:42,637 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T03:25:42,639 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60731, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T03:25:42,642 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T03:25:42,642 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:25:42,644 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C35529%2C1733714740753.meta, suffix=.meta, logDir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753, archiveDir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/oldWALs, maxLogs=32 2024-12-09T03:25:42,645 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C35529%2C1733714740753.meta.1733714742645.meta 2024-12-09T03:25:42,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:42,650 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.meta.1733714742645.meta 2024-12-09T03:25:42,651 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46871:46871),(127.0.0.1/127.0.0.1:44973:44973)] 2024-12-09T03:25:42,652 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:25:42,653 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T03:25:42,653 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T03:25:42,653 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T03:25:42,653 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T03:25:42,653 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:25:42,653 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T03:25:42,653 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T03:25:42,655 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:25:42,656 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:25:42,656 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:42,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:25:42,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:25:42,658 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:25:42,658 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:42,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:25:42,659 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:25:42,660 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:25:42,660 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:42,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:25:42,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:25:42,662 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:25:42,662 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:42,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:25:42,663 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:25:42,664 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740 2024-12-09T03:25:42,665 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740 2024-12-09T03:25:42,666 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:25:42,666 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:25:42,667 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:25:42,669 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:25:42,670 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=865276, jitterRate=0.10025608539581299}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:25:42,670 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T03:25:42,671 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733714742653Writing region info on filesystem at 1733714742653Initializing all the Stores at 1733714742654 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714742654Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714742655 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714742655Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714742655Cleaning up temporary data from old regions at 1733714742666 (+11 ms)Running coprocessor post-open hooks at 1733714742670 (+4 ms)Region opened successfully at 1733714742671 (+1 ms) 2024-12-09T03:25:42,672 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733714742637 2024-12-09T03:25:42,674 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T03:25:42,675 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T03:25:42,676 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,35529,1733714740753 2024-12-09T03:25:42,677 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,35529,1733714740753, state=OPEN 2024-12-09T03:25:42,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:25:42,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:25:42,767 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1617b0b1421f,35529,1733714740753 2024-12-09T03:25:42,767 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:25:42,767 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:25:42,770 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T03:25:42,770 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,35529,1733714740753 in 284 msec 2024-12-09T03:25:42,773 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T03:25:42,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 714 msec 2024-12-09T03:25:42,774 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:25:42,774 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T03:25:42,776 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:25:42,776 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,35529,1733714740753, seqNum=-1] 2024-12-09T03:25:42,776 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:25:42,778 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54457, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:25:42,785 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 779 msec 2024-12-09T03:25:42,785 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733714742785, completionTime=-1 2024-12-09T03:25:42,785 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T03:25:42,785 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T03:25:42,787 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T03:25:42,787 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733714802787 2024-12-09T03:25:42,787 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733714862787 2024-12-09T03:25:42,787 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-09T03:25:42,787 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,32973,1733714740564-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,787 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,32973,1733714740564-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,787 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,32973,1733714740564-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,787 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1617b0b1421f:32973, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,787 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,788 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,789 DEBUG [master/1617b0b1421f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T03:25:42,791 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.969sec 2024-12-09T03:25:42,791 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T03:25:42,791 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T03:25:42,791 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T03:25:42,791 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T03:25:42,791 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T03:25:42,792 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,32973,1733714740564-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:25:42,792 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,32973,1733714740564-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T03:25:42,794 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T03:25:42,795 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T03:25:42,795 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,32973,1733714740564-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:25:42,887 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fede049, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:25:42,887 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1617b0b1421f,32973,-1 for getting cluster id 2024-12-09T03:25:42,887 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T03:25:42,890 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c1f17dfa-ca71-4eba-8da9-c9d809ab6208' 2024-12-09T03:25:42,890 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T03:25:42,890 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c1f17dfa-ca71-4eba-8da9-c9d809ab6208" 2024-12-09T03:25:42,891 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@266c230e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:25:42,891 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1617b0b1421f,32973,-1] 2024-12-09T03:25:42,891 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T03:25:42,891 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:25:42,893 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33748, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T03:25:42,894 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48cd4967, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:25:42,895 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:25:42,896 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,35529,1733714740753, seqNum=-1] 2024-12-09T03:25:42,896 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:25:42,898 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51304, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:25:42,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1617b0b1421f,32973,1733714740564 2024-12-09T03:25:42,901 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:25:42,904 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T03:25:42,904 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-09T03:25:42,904 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-09T03:25:42,905 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T03:25:42,906 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 1617b0b1421f,32973,1733714740564 2024-12-09T03:25:42,906 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5a4bb7ab 2024-12-09T03:25:42,906 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T03:25:42,910 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33764, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T03:25:42,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32973 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T03:25:42,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32973 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T03:25:42,911 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32973 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:25:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32973 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T03:25:42,914 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T03:25:42,914 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:42,914 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32973 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-09T03:25:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32973 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:25:42,915 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T03:25:42,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741835_1011 (size=395) 2024-12-09T03:25:42,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741835_1011 (size=395) 2024-12-09T03:25:42,933 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7e0060bbc5fe43d1c98296f6cac5153c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9 2024-12-09T03:25:42,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33755 is added to blk_1073741836_1012 (size=78) 2024-12-09T03:25:42,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741836_1012 (size=78) 2024-12-09T03:25:42,943 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:25:42,943 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 7e0060bbc5fe43d1c98296f6cac5153c, disabling compactions & flushes 2024-12-09T03:25:42,943 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:25:42,943 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:25:42,944 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. after waiting 0 ms 2024-12-09T03:25:42,944 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:25:42,944 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:25:42,944 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7e0060bbc5fe43d1c98296f6cac5153c: Waiting for close lock at 1733714742943Disabling compacts and flushes for region at 1733714742943Disabling writes for close at 1733714742944 (+1 ms)Writing region close event to WAL at 1733714742944Closed at 1733714742944 2024-12-09T03:25:42,946 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T03:25:42,946 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733714742946"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733714742946"}]},"ts":"1733714742946"} 2024-12-09T03:25:42,949 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T03:25:42,950 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T03:25:42,950 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733714742950"}]},"ts":"1733714742950"} 2024-12-09T03:25:42,953 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-09T03:25:42,953 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7e0060bbc5fe43d1c98296f6cac5153c, ASSIGN}] 2024-12-09T03:25:42,954 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7e0060bbc5fe43d1c98296f6cac5153c, ASSIGN 2024-12-09T03:25:42,955 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7e0060bbc5fe43d1c98296f6cac5153c, ASSIGN; state=OFFLINE, location=1617b0b1421f,35529,1733714740753; forceNewPlan=false, retain=false 2024-12-09T03:25:43,106 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7e0060bbc5fe43d1c98296f6cac5153c, regionState=OPENING, regionLocation=1617b0b1421f,35529,1733714740753 2024-12-09T03:25:43,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7e0060bbc5fe43d1c98296f6cac5153c, ASSIGN because future has completed 2024-12-09T03:25:43,109 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7e0060bbc5fe43d1c98296f6cac5153c, server=1617b0b1421f,35529,1733714740753}] 2024-12-09T03:25:43,274 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:25:43,275 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7e0060bbc5fe43d1c98296f6cac5153c, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:25:43,275 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:25:43,275 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:25:43,275 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:25:43,275 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:25:43,277 INFO [StoreOpener-7e0060bbc5fe43d1c98296f6cac5153c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:25:43,279 INFO [StoreOpener-7e0060bbc5fe43d1c98296f6cac5153c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7e0060bbc5fe43d1c98296f6cac5153c columnFamilyName info 2024-12-09T03:25:43,279 DEBUG [StoreOpener-7e0060bbc5fe43d1c98296f6cac5153c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:25:43,280 INFO [StoreOpener-7e0060bbc5fe43d1c98296f6cac5153c-1 {}] regionserver.HStore(327): Store=7e0060bbc5fe43d1c98296f6cac5153c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:25:43,280 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:25:43,281 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:25:43,281 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:25:43,282 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:25:43,282 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:25:43,284 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:25:43,287 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0060bbc5fe43d1c98296f6cac5153c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:25:43,288 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7e0060bbc5fe43d1c98296f6cac5153c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722771, jitterRate=-0.08094953000545502}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T03:25:43,288 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:25:43,289 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7e0060bbc5fe43d1c98296f6cac5153c: Running coprocessor pre-open hook at 1733714743276Writing region info on filesystem at 1733714743276Initializing all the Stores at 1733714743277 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714743277Cleaning up temporary data from old regions at 1733714743282 (+5 ms)Running coprocessor post-open hooks at 1733714743288 (+6 ms)Region opened successfully at 1733714743289 (+1 ms) 2024-12-09T03:25:43,290 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c., pid=6, masterSystemTime=1733714743269 2024-12-09T03:25:43,293 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:25:43,293 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:25:43,294 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7e0060bbc5fe43d1c98296f6cac5153c, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,35529,1733714740753 2024-12-09T03:25:43,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7e0060bbc5fe43d1c98296f6cac5153c, server=1617b0b1421f,35529,1733714740753 because future has completed 2024-12-09T03:25:43,306 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T03:25:43,306 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7e0060bbc5fe43d1c98296f6cac5153c, server=1617b0b1421f,35529,1733714740753 in 194 msec 2024-12-09T03:25:43,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T03:25:43,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7e0060bbc5fe43d1c98296f6cac5153c, ASSIGN in 353 msec 2024-12-09T03:25:43,312 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T03:25:43,313 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733714743312"}]},"ts":"1733714743312"} 2024-12-09T03:25:43,316 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-09T03:25:43,318 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T03:25:43,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 407 msec 2024-12-09T03:25:43,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:43,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:44,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:44,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:45,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:45,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:46,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:46,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:46,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T03:25:46,746 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T03:25:46,748 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T03:25:46,748 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-09T03:25:46,749 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:25:46,750 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T03:25:47,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:47,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:48,158 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:25:48,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:48,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:48,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:48,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:48,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:48,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:48,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:48,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:48,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:48,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:25:48,183 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T03:25:48,183 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-09T03:25:48,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:48,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:49,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:49,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:50,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:50,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:51,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:51,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:52,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:52,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:52,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32973 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:25:52,942 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-09T03:25:52,943 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-09T03:25:52,948 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T03:25:52,948 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:25:52,953 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c., hostname=1617b0b1421f,35529,1733714740753, seqNum=2] 2024-12-09T03:25:53,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:53,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:54,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:54,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:54,956 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 2024-12-09T03:25:54,957 WARN [ResponseProcessor for block BP-1833210865-172.17.0.3-1733714738365:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1833210865-172.17.0.3-1733714738365:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1833210865-172.17.0.3-1733714738365:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:33755,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:54,957 WARN [ResponseProcessor for block BP-1833210865-172.17.0.3-1733714738365:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1833210865-172.17.0.3-1733714738365:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1833210865-172.17.0.3-1733714738365:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:33755,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:54,957 WARN [ResponseProcessor for block BP-1833210865-172.17.0.3-1733714738365:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1833210865-172.17.0.3-1733714738365:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1833210865-172.17.0.3-1733714738365:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:33755,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:54,958 WARN [DataStreamer for file /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564/1617b0b1421f%2C32973%2C1733714740564.1733714741756 block BP-1833210865-172.17.0.3-1733714738365:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1833210865-172.17.0.3-1733714738365:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK], DatanodeInfoWithStorage[127.0.0.1:33755,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33755,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK]) is bad. 2024-12-09T03:25:54,958 WARN [DataStreamer for file /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 block BP-1833210865-172.17.0.3-1733714738365:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1833210865-172.17.0.3-1733714738365:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK], DatanodeInfoWithStorage[127.0.0.1:33755,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33755,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK]) is bad. 2024-12-09T03:25:54,958 WARN [PacketResponder: BP-1833210865-172.17.0.3-1733714738365:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33755] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:54,958 WARN [DataStreamer for file /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.meta.1733714742645.meta block BP-1833210865-172.17.0.3-1733714738365:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1833210865-172.17.0.3-1733714738365:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK], DatanodeInfoWithStorage[127.0.0.1:33755,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33755,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK]) is bad. 2024-12-09T03:25:54,958 WARN [PacketResponder: BP-1833210865-172.17.0.3-1733714738365:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33755] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:54,959 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1931851033_22 at /127.0.0.1:46574 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39969:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46574 dst: /127.0.0.1:39969 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:54,959 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2075705118_22 at /127.0.0.1:46602 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39969:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46602 dst: /127.0.0.1:39969 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:54,959 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2075705118_22 at /127.0.0.1:46628 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39969:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46628 dst: /127.0.0.1:39969 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:54,960 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2075705118_22 at /127.0.0.1:45198 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33755:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45198 dst: /127.0.0.1:33755 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:54,960 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1931851033_22 at /127.0.0.1:45148 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33755:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45148 dst: /127.0.0.1:33755 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:54,960 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2075705118_22 at /127.0.0.1:45190 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33755:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45190 dst: /127.0.0.1:33755 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:55,019 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@604d132d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:55,020 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6be7442f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:25:55,020 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:25:55,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a03251a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:25:55,020 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@156f9288{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,STOPPED} 2024-12-09T03:25:55,022 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:25:55,022 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:25:55,022 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1833210865-172.17.0.3-1733714738365 (Datanode Uuid 1cfda00a-8060-4140-9a0f-159f6b31e046) service to localhost/127.0.0.1:33821 2024-12-09T03:25:55,022 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:25:55,023 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data3/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:55,023 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data4/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:55,023 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:25:55,034 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:25:55,037 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:25:55,038 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:25:55,038 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:25:55,038 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:25:55,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2877a875{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:25:55,039 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b6fee43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:25:55,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10b3b4b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/java.io.tmpdir/jetty-localhost-37253-hadoop-hdfs-3_4_1-tests_jar-_-any-18390930192421092205/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:55,127 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35bfd15a{HTTP/1.1, (http/1.1)}{localhost:37253} 2024-12-09T03:25:55,127 INFO [Time-limited test {}] server.Server(415): Started @175377ms 2024-12-09T03:25:55,128 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:25:55,149 WARN [ResponseProcessor for block BP-1833210865-172.17.0.3-1733714738365:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1833210865-172.17.0.3-1733714738365:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:55,149 WARN [ResponseProcessor for block BP-1833210865-172.17.0.3-1733714738365:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1833210865-172.17.0.3-1733714738365:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:55,149 WARN [ResponseProcessor for block BP-1833210865-172.17.0.3-1733714738365:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1833210865-172.17.0.3-1733714738365:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:55,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2075705118_22 at /127.0.0.1:57952 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39969:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57952 dst: /127.0.0.1:39969 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:55,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1931851033_22 at /127.0.0.1:57936 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39969:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57936 dst: /127.0.0.1:39969 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:55,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2075705118_22 at /127.0.0.1:57960 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39969:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57960 dst: /127.0.0.1:39969 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:25:55,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77432f4c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:55,155 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@71e2a51d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:25:55,155 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:25:55,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@798b58c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:25:55,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5074bb5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,STOPPED} 2024-12-09T03:25:55,156 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:25:55,156 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:25:55,156 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:25:55,156 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1833210865-172.17.0.3-1733714738365 (Datanode Uuid 6a965f7b-9083-469a-be74-ef1f38191bc1) service to localhost/127.0.0.1:33821 2024-12-09T03:25:55,157 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data1/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:55,157 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data2/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:25:55,157 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:25:55,163 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:25:55,166 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:25:55,167 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:25:55,167 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:25:55,167 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:25:55,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eae9216{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:25:55,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7006c52a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:25:55,255 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74025bad{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/java.io.tmpdir/jetty-localhost-33249-hadoop-hdfs-3_4_1-tests_jar-_-any-3962490260733708548/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:25:55,255 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@263f60f8{HTTP/1.1, (http/1.1)}{localhost:33249} 2024-12-09T03:25:55,255 INFO [Time-limited test {}] server.Server(415): Started @175505ms 2024-12-09T03:25:55,256 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:25:55,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:55,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:55,768 WARN [Thread-1340 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:25:55,770 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae1cf472c27d458f with lease ID 0x21c5436d2a3c3abb: from storage DS-3ce81d50-f164-4be3-9411-b131b58fc4a8 node DatanodeRegistration(127.0.0.1:37271, datanodeUuid=1cfda00a-8060-4140-9a0f-159f6b31e046, infoPort=38437, infoSecurePort=0, ipcPort=46183, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:55,771 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae1cf472c27d458f with lease ID 0x21c5436d2a3c3abb: from storage DS-299814fd-e3b5-4f7d-9e40-e74d73ab7889 node DatanodeRegistration(127.0.0.1:37271, datanodeUuid=1cfda00a-8060-4140-9a0f-159f6b31e046, infoPort=38437, infoSecurePort=0, ipcPort=46183, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:55,869 WARN [Thread-1360 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:25:55,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3ac640a1c65aae1 with lease ID 0x21c5436d2a3c3abc: from storage DS-5afc482f-7096-4e90-89ad-18145eb98dfd node DatanodeRegistration(127.0.0.1:36039, datanodeUuid=6a965f7b-9083-469a-be74-ef1f38191bc1, infoPort=43247, infoSecurePort=0, ipcPort=37949, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:55,871 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3ac640a1c65aae1 with lease ID 0x21c5436d2a3c3abc: from storage DS-f0c74e4f-48e6-4ba2-a972-99bb18815a7f node DatanodeRegistration(127.0.0.1:36039, datanodeUuid=6a965f7b-9083-469a-be74-ef1f38191bc1, infoPort=43247, infoSecurePort=0, ipcPort=37949, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:25:56,275 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-09T03:25:56,280 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-09T03:25:56,281 ERROR [FSHLog-0-hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9-prefix:1617b0b1421f,35529,1733714740753 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:56,281 WARN [FSHLog-0-hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9-prefix:1617b0b1421f,35529,1733714740753 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:56,281 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C35529%2C1733714740753:(num 1733714742297) roll requested 2024-12-09T03:25:56,282 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C35529%2C1733714740753.1733714756282 2024-12-09T03:25:56,288 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 newFile=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 2024-12-09T03:25:56,288 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:56,288 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:56,289 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:56,289 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:56,289 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:25:56,289 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 2024-12-09T03:25:56,289 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:56,290 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:25:56,290 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 2024-12-09T03:25:56,290 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38437:38437),(127.0.0.1/127.0.0.1:43247:43247)] 2024-12-09T03:25:56,290 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 is not closed yet, will try archiving it next time 2024-12-09T03:25:56,290 WARN [IPC Server handler 1 on default port 33821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-12-09T03:25:56,291 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 after 1ms 2024-12-09T03:25:56,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:56,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:57,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:57,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:57,772 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T03:25:58,298 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-09T03:25:58,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:58,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:59,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:25:59,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:00,293 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 after 4002ms 2024-12-09T03:26:00,303 WARN [ResponseProcessor for block BP-1833210865-172.17.0.3-1733714738365:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1833210865-172.17.0.3-1733714738365:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1833210865-172.17.0.3-1733714738365:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:36039,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:00,303 WARN [DataStreamer for file /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 block BP-1833210865-172.17.0.3-1733714738365:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1833210865-172.17.0.3-1733714738365:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37271,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK], DatanodeInfoWithStorage[127.0.0.1:36039,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36039,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]) is bad. 2024-12-09T03:26:00,303 WARN [PacketResponder: BP-1833210865-172.17.0.3-1733714738365:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36039] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:26:00,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2075705118_22 at /127.0.0.1:39202 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37271:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39202 dst: /127.0.0.1:37271 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:26:00,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2075705118_22 at /127.0.0.1:47492 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47492 dst: /127.0.0.1:36039 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:26:00,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74025bad{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:26:00,335 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@263f60f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:26:00,335 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:26:00,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7006c52a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:26:00,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eae9216{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,STOPPED} 2024-12-09T03:26:00,337 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:26:00,337 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:26:00,337 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:26:00,337 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1833210865-172.17.0.3-1733714738365 (Datanode Uuid 6a965f7b-9083-469a-be74-ef1f38191bc1) service to localhost/127.0.0.1:33821 2024-12-09T03:26:00,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data1/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:26:00,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data2/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:26:00,338 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:26:00,354 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:26:00,357 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:26:00,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:26:00,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:26:00,361 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:26:00,361 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22ae57aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:26:00,361 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16e84d55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:26:00,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@727fa385{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/java.io.tmpdir/jetty-localhost-45297-hadoop-hdfs-3_4_1-tests_jar-_-any-17920304407037572731/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:26:00,466 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38762891{HTTP/1.1, (http/1.1)}{localhost:45297} 2024-12-09T03:26:00,466 INFO [Time-limited test {}] server.Server(415): Started @180716ms 2024-12-09T03:26:00,467 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:26:00,488 WARN [ResponseProcessor for block BP-1833210865-172.17.0.3-1733714738365:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1833210865-172.17.0.3-1733714738365:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:00,488 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2075705118_22 at /127.0.0.1:56232 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37271:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56232 dst: /127.0.0.1:37271 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:26:00,496 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10b3b4b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:26:00,497 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35bfd15a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:26:00,497 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:26:00,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b6fee43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:26:00,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2877a875{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,STOPPED} 2024-12-09T03:26:00,498 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:26:00,498 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1833210865-172.17.0.3-1733714738365 (Datanode Uuid 1cfda00a-8060-4140-9a0f-159f6b31e046) service to localhost/127.0.0.1:33821 2024-12-09T03:26:00,498 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:26:00,498 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:26:00,498 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data3/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:26:00,499 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data4/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:26:00,499 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:26:00,513 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:26:00,516 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:26:00,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:26:00,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:26:00,518 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:26:00,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ce3c2a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:26:00,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5757dea9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:26:00,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6edb8d2f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/java.io.tmpdir/jetty-localhost-36241-hadoop-hdfs-3_4_1-tests_jar-_-any-14512437145471806841/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:26:00,607 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ab179b2{HTTP/1.1, (http/1.1)}{localhost:36241} 2024-12-09T03:26:00,607 INFO [Time-limited test {}] server.Server(415): Started @180857ms 2024-12-09T03:26:00,608 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:26:00,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:00,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:00,952 WARN [Thread-1414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:26:00,955 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4320d6f696fb77e4 with lease ID 0x21c5436d2a3c3abd: from storage DS-5afc482f-7096-4e90-89ad-18145eb98dfd node DatanodeRegistration(127.0.0.1:43799, datanodeUuid=6a965f7b-9083-469a-be74-ef1f38191bc1, infoPort=35793, infoSecurePort=0, ipcPort=43785, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T03:26:00,955 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4320d6f696fb77e4 with lease ID 0x21c5436d2a3c3abd: from storage DS-f0c74e4f-48e6-4ba2-a972-99bb18815a7f node DatanodeRegistration(127.0.0.1:43799, datanodeUuid=6a965f7b-9083-469a-be74-ef1f38191bc1, infoPort=35793, infoSecurePort=0, ipcPort=43785, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:26:01,125 WARN [Thread-1434 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:26:01,128 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8810f031b58d1d82 with lease ID 0x21c5436d2a3c3abe: from storage DS-3ce81d50-f164-4be3-9411-b131b58fc4a8 node DatanodeRegistration(127.0.0.1:44155, datanodeUuid=1cfda00a-8060-4140-9a0f-159f6b31e046, infoPort=40991, infoSecurePort=0, ipcPort=34081, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:26:01,128 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8810f031b58d1d82 with lease ID 0x21c5436d2a3c3abe: from storage DS-299814fd-e3b5-4f7d-9e40-e74d73ab7889 node DatanodeRegistration(127.0.0.1:44155, datanodeUuid=1cfda00a-8060-4140-9a0f-159f6b31e046, infoPort=40991, infoSecurePort=0, ipcPort=34081, storageInfo=lv=-57;cid=testClusterID;nsid=662381745;c=1733714738365), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:26:01,624 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-09T03:26:01,626 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-09T03:26:01,628 ERROR [FSHLog-0-hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9-prefix:1617b0b1421f,35529,1733714740753 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37271,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:01,628 WARN [FSHLog-0-hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9-prefix:1617b0b1421f,35529,1733714740753 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37271,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:01,629 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C35529%2C1733714740753:(num 1733714756282) roll requested 2024-12-09T03:26:01,629 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C35529%2C1733714740753.1733714761629 2024-12-09T03:26:01,645 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 newFile=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714761629 2024-12-09T03:26:01,645 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:01,645 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:01,645 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:01,646 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:01,646 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:01,646 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714761629 2024-12-09T03:26:01,646 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37271,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:01,646 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37271,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:01,646 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 2024-12-09T03:26:01,647 WARN [IPC Server handler 4 on default port 33821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-09T03:26:01,647 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 after 1ms 2024-12-09T03:26:01,647 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40991:40991),(127.0.0.1/127.0.0.1:35793:35793)] 2024-12-09T03:26:01,647 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 is not closed yet, will try archiving it next time 2024-12-09T03:26:01,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:01,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:02,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:02,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:03,649 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C35529%2C1733714740753.1733714763649 2024-12-09T03:26:03,658 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714761629 newFile=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 2024-12-09T03:26:03,658 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:03,658 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:03,658 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:03,658 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:03,658 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:03,658 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714761629 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 2024-12-09T03:26:03,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:03,660 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35793:35793),(127.0.0.1/127.0.0.1:40991:40991)] 2024-12-09T03:26:03,660 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 is not closed yet, will try archiving it next time 2024-12-09T03:26:03,660 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714761629 is not closed yet, will try archiving it next time 2024-12-09T03:26:03,660 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 2024-12-09T03:26:03,660 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 2024-12-09T03:26:03,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741838_1019 (size=1264) 2024-12-09T03:26:03,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741838_1019 (size=1264) 2024-12-09T03:26:03,661 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 after 1ms 2024-12-09T03:26:03,661 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 2024-12-09T03:26:03,661 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 is not closed yet, will try archiving it next time 2024-12-09T03:26:03,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:03,670 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733714743289/Put/vlen=218/seqid=0] 2024-12-09T03:26:03,670 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733714752954/Put/vlen=1045/seqid=0] 2024-12-09T03:26:03,670 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714742297 2024-12-09T03:26:03,670 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 2024-12-09T03:26:03,670 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 2024-12-09T03:26:03,671 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 after 1ms 2024-12-09T03:26:03,671 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 2024-12-09T03:26:03,677 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733714756281/Put/vlen=1045/seqid=0] 2024-12-09T03:26:03,678 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733714758300/Put/vlen=1045/seqid=0] 2024-12-09T03:26:03,678 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 2024-12-09T03:26:03,678 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714761629 2024-12-09T03:26:03,678 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714761629 2024-12-09T03:26:03,678 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714761629 after 0ms 2024-12-09T03:26:03,678 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714761629 2024-12-09T03:26:03,682 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733714761628/Put/vlen=1045/seqid=0] 2024-12-09T03:26:03,682 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 2024-12-09T03:26:03,682 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 2024-12-09T03:26:03,683 WARN [IPC Server handler 2 on default port 33821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-09T03:26:03,683 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 after 1ms 2024-12-09T03:26:03,955 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T03:26:04,132 WARN [ResponseProcessor for block BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:04,132 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1931851033_22 at /127.0.0.1:33198 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43799:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33198 dst: /127.0.0.1:43799 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43799 remote=/127.0.0.1:33198]. Total timeout mills is 60000, 59526 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:26:04,132 WARN [DataStreamer for file /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 block BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43799,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK], DatanodeInfoWithStorage[127.0.0.1:44155,DS-3ce81d50-f164-4be3-9411-b131b58fc4a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43799,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]) is bad. 2024-12-09T03:26:04,132 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1931851033_22 at /127.0.0.1:57558 [Receiving block BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44155:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57558 dst: /127.0.0.1:44155 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:26:04,133 WARN [DataStreamer for file /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 block BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:04,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741839_1022 (size=85) 2024-12-09T03:26:04,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:04,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:05,648 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714756282 after 4002ms 2024-12-09T03:26:05,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:05,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:06,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:06,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:07,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:07,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:07,684 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 after 4002ms 2024-12-09T03:26:07,684 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 2024-12-09T03:26:07,690 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 2024-12-09T03:26:07,690 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-09T03:26:07,691 ERROR [FSHLog-0-hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9-prefix:1617b0b1421f,35529,1733714740753.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:07,691 WARN [FSHLog-0-hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9-prefix:1617b0b1421f,35529,1733714740753.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:07,691 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C35529%2C1733714740753.meta:.meta(num 1733714742645) roll requested 2024-12-09T03:26:07,692 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C35529%2C1733714740753.meta.1733714767692.meta 2024-12-09T03:26:07,697 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:07,697 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:07,697 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:07,697 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:07,697 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:07,697 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.meta.1733714742645.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.meta.1733714767692.meta 2024-12-09T03:26:07,699 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:07,699 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:07,699 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.meta.1733714742645.meta 2024-12-09T03:26:07,699 WARN [IPC Server handler 4 on default port 33821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.meta.1733714742645.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-12-09T03:26:07,699 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.meta.1733714742645.meta after 0ms 2024-12-09T03:26:07,701 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35793:35793),(127.0.0.1/127.0.0.1:40991:40991)] 2024-12-09T03:26:07,701 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.meta.1733714742645.meta is not closed yet, will try archiving it next time 2024-12-09T03:26:07,717 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/.tmp/info/c03e5a5cfd4f4dee97f58acf69177878 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c./info:regioninfo/1733714743294/Put/seqid=0 2024-12-09T03:26:07,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741841_1025 (size=7125) 2024-12-09T03:26:07,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741841_1025 (size=7125) 2024-12-09T03:26:07,723 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/.tmp/info/c03e5a5cfd4f4dee97f58acf69177878 2024-12-09T03:26:07,745 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/.tmp/ns/41fa5bfa9b074a17b602a737bbd2712b is 43, key is default/ns:d/1733714742778/Put/seqid=0 2024-12-09T03:26:07,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741842_1026 (size=5153) 2024-12-09T03:26:07,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741842_1026 (size=5153) 2024-12-09T03:26:07,751 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/.tmp/ns/41fa5bfa9b074a17b602a737bbd2712b 2024-12-09T03:26:07,772 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/.tmp/table/cb9a9ad9ed634e8fa35d68d67062124c is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733714743312/Put/seqid=0 2024-12-09T03:26:07,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741843_1027 (size=5438) 2024-12-09T03:26:07,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741843_1027 (size=5438) 2024-12-09T03:26:07,777 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/.tmp/table/cb9a9ad9ed634e8fa35d68d67062124c 2024-12-09T03:26:07,785 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/.tmp/info/c03e5a5cfd4f4dee97f58acf69177878 as hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/info/c03e5a5cfd4f4dee97f58acf69177878 2024-12-09T03:26:07,791 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/info/c03e5a5cfd4f4dee97f58acf69177878, entries=10, sequenceid=11, filesize=7.0 K 2024-12-09T03:26:07,792 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/.tmp/ns/41fa5bfa9b074a17b602a737bbd2712b as hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/ns/41fa5bfa9b074a17b602a737bbd2712b 2024-12-09T03:26:07,798 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/ns/41fa5bfa9b074a17b602a737bbd2712b, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T03:26:07,799 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/.tmp/table/cb9a9ad9ed634e8fa35d68d67062124c as hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/table/cb9a9ad9ed634e8fa35d68d67062124c 2024-12-09T03:26:07,805 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/table/cb9a9ad9ed634e8fa35d68d67062124c, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T03:26:07,807 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 117ms, sequenceid=11, compaction requested=false 2024-12-09T03:26:07,807 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T03:26:07,807 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7e0060bbc5fe43d1c98296f6cac5153c 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-09T03:26:07,808 ERROR [FSHLog-0-hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9-prefix:1617b0b1421f,35529,1733714740753 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:07,808 WARN [FSHLog-0-hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9-prefix:1617b0b1421f,35529,1733714740753 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:07,808 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C35529%2C1733714740753:(num 1733714763649) roll requested 2024-12-09T03:26:07,809 INFO [regionserver/1617b0b1421f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C35529%2C1733714740753.1733714767808 2024-12-09T03:26:07,813 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 newFile=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714767808 2024-12-09T03:26:07,814 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:07,814 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:07,814 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:07,814 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:07,814 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:07,814 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714767808 2024-12-09T03:26:07,814 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:07,815 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1833210865-172.17.0.3-1733714738365:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:07,815 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 2024-12-09T03:26:07,815 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 after 0ms 2024-12-09T03:26:07,816 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.1733714763649 to hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/oldWALs/1617b0b1421f%2C35529%2C1733714740753.1733714763649 2024-12-09T03:26:07,820 DEBUG [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35793:35793),(127.0.0.1/127.0.0.1:40991:40991)] 2024-12-09T03:26:07,836 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0060bbc5fe43d1c98296f6cac5153c/.tmp/info/d422ee3f951040eb8bc27ec27daf9dfe is 1080, key is row1002/info:/1733714752954/Put/seqid=0 2024-12-09T03:26:07,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741845_1029 (size=9270) 2024-12-09T03:26:07,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741845_1029 (size=9270) 2024-12-09T03:26:07,841 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0060bbc5fe43d1c98296f6cac5153c/.tmp/info/d422ee3f951040eb8bc27ec27daf9dfe 2024-12-09T03:26:07,847 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0060bbc5fe43d1c98296f6cac5153c/.tmp/info/d422ee3f951040eb8bc27ec27daf9dfe as hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0060bbc5fe43d1c98296f6cac5153c/info/d422ee3f951040eb8bc27ec27daf9dfe 2024-12-09T03:26:07,853 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0060bbc5fe43d1c98296f6cac5153c/info/d422ee3f951040eb8bc27ec27daf9dfe, entries=4, sequenceid=8, filesize=9.1 K 2024-12-09T03:26:07,854 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 7e0060bbc5fe43d1c98296f6cac5153c in 47ms, sequenceid=8, compaction requested=false 2024-12-09T03:26:07,854 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7e0060bbc5fe43d1c98296f6cac5153c: 2024-12-09T03:26:07,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T03:26:07,859 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:26:07,859 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:26:07,859 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:26:07,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:26:07,860 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T03:26:07,860 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T03:26:07,860 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1126613160, stopped=false 2024-12-09T03:26:07,860 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1617b0b1421f,32973,1733714740564 2024-12-09T03:26:07,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:26:07,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:26:07,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:07,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:07,873 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:26:07,873 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:26:07,873 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:26:07,873 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:26:07,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:26:07,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:26:07,873 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1617b0b1421f,35529,1733714740753' ***** 2024-12-09T03:26:07,873 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:26:07,874 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:26:07,874 INFO [RS:0;1617b0b1421f:35529 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:26:07,874 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:26:07,874 INFO [RS:0;1617b0b1421f:35529 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:26:07,874 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(3091): Received CLOSE for 7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:26:07,874 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(959): stopping server 1617b0b1421f,35529,1733714740753 2024-12-09T03:26:07,874 INFO [RS:0;1617b0b1421f:35529 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:26:07,874 INFO [RS:0;1617b0b1421f:35529 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1617b0b1421f:35529. 2024-12-09T03:26:07,874 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7e0060bbc5fe43d1c98296f6cac5153c, disabling compactions & flushes 2024-12-09T03:26:07,874 DEBUG [RS:0;1617b0b1421f:35529 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:26:07,874 DEBUG [RS:0;1617b0b1421f:35529 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:26:07,874 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:26:07,874 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:26:07,874 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. after waiting 0 ms 2024-12-09T03:26:07,874 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:26:07,874 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:26:07,874 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:26:07,874 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:26:07,874 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T03:26:07,875 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T03:26:07,875 DEBUG [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 7e0060bbc5fe43d1c98296f6cac5153c=TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c.} 2024-12-09T03:26:07,875 DEBUG [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7e0060bbc5fe43d1c98296f6cac5153c 2024-12-09T03:26:07,875 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:26:07,875 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:26:07,875 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:26:07,875 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:26:07,875 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:26:07,884 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/7e0060bbc5fe43d1c98296f6cac5153c/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-09T03:26:07,885 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:26:07,885 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7e0060bbc5fe43d1c98296f6cac5153c: Waiting for close lock at 1733714767874Running coprocessor pre-close hooks at 1733714767874Disabling compacts and flushes for region at 1733714767874Disabling writes for close at 1733714767874Writing region close event to WAL at 1733714767877 (+3 ms)Running coprocessor post-close hooks at 1733714767885 (+8 ms)Closed at 1733714767885 2024-12-09T03:26:07,886 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733714742911.7e0060bbc5fe43d1c98296f6cac5153c. 2024-12-09T03:26:07,888 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T03:26:07,889 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:26:07,889 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:26:07,889 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714767875Running coprocessor pre-close hooks at 1733714767875Disabling compacts and flushes for region at 1733714767875Disabling writes for close at 1733714767875Writing region close event to WAL at 1733714767885 (+10 ms)Running coprocessor post-close hooks at 1733714767889 (+4 ms)Closed at 1733714767889 2024-12-09T03:26:07,889 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T03:26:08,075 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(976): stopping server 1617b0b1421f,35529,1733714740753; all regions closed. 2024-12-09T03:26:08,076 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:08,076 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:08,076 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:08,076 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:08,076 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:08,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741840_1023 (size=825) 2024-12-09T03:26:08,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741840_1023 (size=825) 2024-12-09T03:26:08,150 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T03:26:08,150 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T03:26:08,150 INFO [regionserver/1617b0b1421f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:26:08,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:08,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:09,128 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T03:26:09,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:09,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:10,542 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T03:26:10,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:10,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:11,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:11,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:11,701 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.meta.1733714742645.meta after 4002ms 2024-12-09T03:26:11,702 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/WALs/1617b0b1421f,35529,1733714740753/1617b0b1421f%2C35529%2C1733714740753.meta.1733714742645.meta to hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/oldWALs/1617b0b1421f%2C35529%2C1733714740753.meta.1733714742645.meta 2024-12-09T03:26:11,709 DEBUG [RS:0;1617b0b1421f:35529 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/oldWALs 2024-12-09T03:26:11,710 INFO [RS:0;1617b0b1421f:35529 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C35529%2C1733714740753.meta:.meta(num 1733714767692) 2024-12-09T03:26:11,711 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:11,711 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:11,711 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:11,711 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:11,711 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:11,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741844_1028 (size=1162) 2024-12-09T03:26:11,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741844_1028 (size=1162) 2024-12-09T03:26:11,721 DEBUG [RS:0;1617b0b1421f:35529 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/oldWALs 2024-12-09T03:26:11,721 INFO [RS:0;1617b0b1421f:35529 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C35529%2C1733714740753:(num 1733714767808) 2024-12-09T03:26:11,721 DEBUG [RS:0;1617b0b1421f:35529 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:26:11,721 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:26:11,721 INFO [RS:0;1617b0b1421f:35529 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:26:11,721 INFO [RS:0;1617b0b1421f:35529 {}] hbase.ChoreService(370): Chore service for: regionserver/1617b0b1421f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T03:26:11,721 INFO [RS:0;1617b0b1421f:35529 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:26:11,721 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:26:11,721 INFO [RS:0;1617b0b1421f:35529 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35529 2024-12-09T03:26:11,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:26:11,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1617b0b1421f,35529,1733714740753 2024-12-09T03:26:11,810 INFO [RS:0;1617b0b1421f:35529 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:26:11,820 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1617b0b1421f,35529,1733714740753] 2024-12-09T03:26:11,830 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1617b0b1421f,35529,1733714740753 already deleted, retry=false 2024-12-09T03:26:11,830 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1617b0b1421f,35529,1733714740753 expired; onlineServers=0 2024-12-09T03:26:11,830 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1617b0b1421f,32973,1733714740564' ***** 2024-12-09T03:26:11,830 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T03:26:11,830 INFO [M:0;1617b0b1421f:32973 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:26:11,830 INFO [M:0;1617b0b1421f:32973 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:26:11,830 DEBUG [M:0;1617b0b1421f:32973 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T03:26:11,831 DEBUG [M:0;1617b0b1421f:32973 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T03:26:11,831 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T03:26:11,831 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714742018 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714742018,5,FailOnTimeoutGroup] 2024-12-09T03:26:11,831 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714742018 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714742018,5,FailOnTimeoutGroup] 2024-12-09T03:26:11,831 INFO [M:0;1617b0b1421f:32973 {}] hbase.ChoreService(370): Chore service for: master/1617b0b1421f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T03:26:11,831 INFO [M:0;1617b0b1421f:32973 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:26:11,831 DEBUG [M:0;1617b0b1421f:32973 {}] master.HMaster(1795): Stopping service threads 2024-12-09T03:26:11,831 INFO [M:0;1617b0b1421f:32973 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T03:26:11,832 INFO [M:0;1617b0b1421f:32973 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:26:11,832 INFO [M:0;1617b0b1421f:32973 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T03:26:11,832 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T03:26:11,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T03:26:11,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:11,841 DEBUG [M:0;1617b0b1421f:32973 {}] zookeeper.ZKUtil(347): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T03:26:11,841 WARN [M:0;1617b0b1421f:32973 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T03:26:11,842 INFO [M:0;1617b0b1421f:32973 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/.lastflushedseqids 2024-12-09T03:26:11,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741846_1030 (size=139) 2024-12-09T03:26:11,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741846_1030 (size=139) 2024-12-09T03:26:11,851 INFO [M:0;1617b0b1421f:32973 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T03:26:11,851 INFO [M:0;1617b0b1421f:32973 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T03:26:11,851 DEBUG [M:0;1617b0b1421f:32973 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:26:11,851 INFO [M:0;1617b0b1421f:32973 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:26:11,851 DEBUG [M:0;1617b0b1421f:32973 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:26:11,851 DEBUG [M:0;1617b0b1421f:32973 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:26:11,852 DEBUG [M:0;1617b0b1421f:32973 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:26:11,852 INFO [M:0;1617b0b1421f:32973 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-12-09T03:26:11,852 ERROR [FSHLog-0-hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData-prefix:1617b0b1421f,32973,1733714740564 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:11,853 WARN [FSHLog-0-hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData-prefix:1617b0b1421f,32973,1733714740564 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:11,853 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 1617b0b1421f%2C32973%2C1733714740564:(num 1733714741756) roll requested 2024-12-09T03:26:11,853 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C32973%2C1733714740564.1733714771853 2024-12-09T03:26:11,859 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:11,859 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:11,860 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:11,860 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:11,860 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:11,860 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564/1617b0b1421f%2C32973%2C1733714740564.1733714741756 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564/1617b0b1421f%2C32973%2C1733714740564.1733714771853 2024-12-09T03:26:11,860 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:11,861 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39969,DS-5afc482f-7096-4e90-89ad-18145eb98dfd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T03:26:11,861 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564/1617b0b1421f%2C32973%2C1733714740564.1733714741756 2024-12-09T03:26:11,861 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40991:40991),(127.0.0.1/127.0.0.1:35793:35793)] 2024-12-09T03:26:11,861 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564/1617b0b1421f%2C32973%2C1733714740564.1733714741756 is not closed yet, will try archiving it next time 2024-12-09T03:26:11,861 WARN [IPC Server handler 1 on default port 33821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564/1617b0b1421f%2C32973%2C1733714740564.1733714741756 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-12-09T03:26:11,861 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564/1617b0b1421f%2C32973%2C1733714740564.1733714741756 after 0ms 2024-12-09T03:26:11,879 DEBUG [M:0;1617b0b1421f:32973 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/71a4f18727ce420b8bebacea20d49734 is 82, key is hbase:meta,,1/info:regioninfo/1733714742675/Put/seqid=0 2024-12-09T03:26:11,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741848_1033 (size=5672) 2024-12-09T03:26:11,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741848_1033 (size=5672) 2024-12-09T03:26:11,884 INFO [M:0;1617b0b1421f:32973 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/71a4f18727ce420b8bebacea20d49734 2024-12-09T03:26:11,904 DEBUG [M:0;1617b0b1421f:32973 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aca589e79bc64f24862ce1d51e818610 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733714743319/Put/seqid=0 2024-12-09T03:26:11,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741849_1034 (size=6117) 2024-12-09T03:26:11,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741849_1034 (size=6117) 2024-12-09T03:26:11,909 INFO [M:0;1617b0b1421f:32973 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aca589e79bc64f24862ce1d51e818610 2024-12-09T03:26:11,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:26:11,920 INFO [RS:0;1617b0b1421f:35529 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:26:11,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35529-0x100089b8c070001, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:26:11,920 INFO [RS:0;1617b0b1421f:35529 {}] regionserver.HRegionServer(1031): Exiting; stopping=1617b0b1421f,35529,1733714740753; zookeeper connection closed. 2024-12-09T03:26:11,920 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@75ce4b18 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@75ce4b18 2024-12-09T03:26:11,921 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T03:26:11,927 DEBUG [M:0;1617b0b1421f:32973 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7a30e614456d4935a528492a4d1410de is 69, key is 1617b0b1421f,35529,1733714740753/rs:state/1733714742122/Put/seqid=0 2024-12-09T03:26:11,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741850_1035 (size=5156) 2024-12-09T03:26:11,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741850_1035 (size=5156) 2024-12-09T03:26:11,932 INFO [M:0;1617b0b1421f:32973 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7a30e614456d4935a528492a4d1410de 2024-12-09T03:26:11,949 DEBUG [M:0;1617b0b1421f:32973 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d9030864b7ee4775bf37a4ff97b3e963 is 52, key is load_balancer_on/state:d/1733714742903/Put/seqid=0 2024-12-09T03:26:11,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741851_1036 (size=5056) 2024-12-09T03:26:11,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741851_1036 (size=5056) 2024-12-09T03:26:11,954 INFO [M:0;1617b0b1421f:32973 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d9030864b7ee4775bf37a4ff97b3e963 2024-12-09T03:26:11,960 DEBUG [M:0;1617b0b1421f:32973 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/71a4f18727ce420b8bebacea20d49734 as hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/71a4f18727ce420b8bebacea20d49734 2024-12-09T03:26:11,965 INFO [M:0;1617b0b1421f:32973 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/71a4f18727ce420b8bebacea20d49734, entries=8, sequenceid=56, filesize=5.5 K 2024-12-09T03:26:11,967 DEBUG [M:0;1617b0b1421f:32973 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aca589e79bc64f24862ce1d51e818610 as hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aca589e79bc64f24862ce1d51e818610 2024-12-09T03:26:11,974 INFO [M:0;1617b0b1421f:32973 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aca589e79bc64f24862ce1d51e818610, entries=6, sequenceid=56, filesize=6.0 K 2024-12-09T03:26:11,985 DEBUG [M:0;1617b0b1421f:32973 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7a30e614456d4935a528492a4d1410de as hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7a30e614456d4935a528492a4d1410de 2024-12-09T03:26:11,992 INFO [M:0;1617b0b1421f:32973 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7a30e614456d4935a528492a4d1410de, entries=1, sequenceid=56, filesize=5.0 K 2024-12-09T03:26:11,993 DEBUG [M:0;1617b0b1421f:32973 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d9030864b7ee4775bf37a4ff97b3e963 as hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d9030864b7ee4775bf37a4ff97b3e963 2024-12-09T03:26:12,000 INFO [M:0;1617b0b1421f:32973 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d9030864b7ee4775bf37a4ff97b3e963, entries=1, sequenceid=56, filesize=4.9 K 2024-12-09T03:26:12,001 INFO [M:0;1617b0b1421f:32973 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=56, compaction requested=false 2024-12-09T03:26:12,002 INFO [M:0;1617b0b1421f:32973 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:26:12,002 DEBUG [M:0;1617b0b1421f:32973 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714771851Disabling compacts and flushes for region at 1733714771851Disabling writes for close at 1733714771851Obtaining lock to block concurrent updates at 1733714771852 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733714771852Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1733714771852Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733714771862 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733714771862Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733714771879 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733714771879Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733714771890 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733714771904 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733714771904Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733714771914 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733714771926 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733714771926Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733714771937 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733714771949 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733714771949Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@332464fd: reopening flushed file at 1733714771959 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50b14ad0: reopening flushed file at 1733714771966 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78b9b106: reopening flushed file at 1733714771974 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fd8ef87: reopening flushed file at 1733714771992 (+18 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=56, compaction requested=false at 1733714772001 (+9 ms)Writing region close event to WAL at 1733714772002 (+1 ms)Closed at 1733714772002 2024-12-09T03:26:12,003 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:12,003 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:12,003 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:12,003 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:12,003 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:12,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43799 is added to blk_1073741847_1031 (size=757) 2024-12-09T03:26:12,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44155 is added to blk_1073741847_1031 (size=757) 2024-12-09T03:26:12,129 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T03:26:12,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:12,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:12,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,904 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,904 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,906 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:12,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,412 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:26:13,414 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,414 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:13,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:13,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:14,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:14,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:15,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:15,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:15,862 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564/1617b0b1421f%2C32973%2C1733714740564.1733714741756 after 4001ms 2024-12-09T03:26:15,862 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/WALs/1617b0b1421f,32973,1733714740564/1617b0b1421f%2C32973%2C1733714740564.1733714741756 to hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/oldWALs/1617b0b1421f%2C32973%2C1733714740564.1733714741756 2024-12-09T03:26:15,865 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/MasterData/oldWALs/1617b0b1421f%2C32973%2C1733714740564.1733714741756 to hdfs://localhost:33821/user/jenkins/test-data/b6ded810-3cba-90d5-1198-2929a21007f9/oldWALs/1617b0b1421f%2C32973%2C1733714740564.1733714741756$masterlocalwal$ 2024-12-09T03:26:15,865 INFO [M:0;1617b0b1421f:32973 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T03:26:15,866 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:26:15,866 INFO [M:0;1617b0b1421f:32973 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:32973 2024-12-09T03:26:15,866 INFO [M:0;1617b0b1421f:32973 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:26:16,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:26:16,014 INFO [M:0;1617b0b1421f:32973 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:26:16,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32973-0x100089b8c070000, quorum=127.0.0.1:50970, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:26:16,052 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6edb8d2f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:26:16,053 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ab179b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:26:16,053 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:26:16,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5757dea9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:26:16,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ce3c2a8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,STOPPED} 2024-12-09T03:26:16,056 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:26:16,056 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:26:16,056 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1833210865-172.17.0.3-1733714738365 (Datanode Uuid 1cfda00a-8060-4140-9a0f-159f6b31e046) service to localhost/127.0.0.1:33821 2024-12-09T03:26:16,056 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:26:16,057 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data3/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:26:16,058 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data4/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:26:16,058 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:26:16,061 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@727fa385{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:26:16,061 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38762891{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:26:16,061 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:26:16,061 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16e84d55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:26:16,061 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22ae57aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,STOPPED} 2024-12-09T03:26:16,063 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:26:16,063 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:26:16,063 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:26:16,063 WARN [BP-1833210865-172.17.0.3-1733714738365 heartbeating to localhost/127.0.0.1:33821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1833210865-172.17.0.3-1733714738365 (Datanode Uuid 6a965f7b-9083-469a-be74-ef1f38191bc1) service to localhost/127.0.0.1:33821 2024-12-09T03:26:16,064 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data1/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:26:16,064 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/cluster_db792ace-edc8-a378-8c15-740ae8f24b79/data/data2/current/BP-1833210865-172.17.0.3-1733714738365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:26:16,064 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:26:16,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73211bd8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:26:16,069 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34714045{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:26:16,069 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:26:16,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@512c947f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:26:16,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3cc9fb65{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir/,STOPPED} 2024-12-09T03:26:16,074 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T03:26:16,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T03:26:16,104 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33821 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:33821 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:33821 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:33821 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:33821 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=457 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=159 (was 175), ProcessCount=11 (was 11), AvailableMemoryMB=5747 (was 5675) - AvailableMemoryMB LEAK? - 2024-12-09T03:26:16,110 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=159, ProcessCount=11, AvailableMemoryMB=5747 2024-12-09T03:26:16,110 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T03:26:16,110 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.log.dir so I do NOT create it in target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661 2024-12-09T03:26:16,110 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b735fca0-f73d-f5d1-a881-a45f1dd5522f/hadoop.tmp.dir so I do NOT create it in target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661 2024-12-09T03:26:16,110 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac, deleteOnExit=true 2024-12-09T03:26:16,110 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T03:26:16,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/test.cache.data in system properties and HBase conf 2024-12-09T03:26:16,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T03:26:16,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/hadoop.log.dir in system properties and HBase conf 2024-12-09T03:26:16,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T03:26:16,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T03:26:16,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T03:26:16,111 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T03:26:16,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:26:16,111 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/nfs.dump.dir in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/java.io.tmpdir in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T03:26:16,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T03:26:16,123 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:26:16,478 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:26:16,482 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:26:16,483 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:26:16,483 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:26:16,483 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:26:16,483 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:26:16,483 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d9708a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:26:16,484 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e7fcc45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:26:16,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4182d22c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/java.io.tmpdir/jetty-localhost-36071-hadoop-hdfs-3_4_1-tests_jar-_-any-11289286497448788716/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:26:16,572 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22d52882{HTTP/1.1, (http/1.1)}{localhost:36071} 2024-12-09T03:26:16,572 INFO [Time-limited test {}] server.Server(415): Started @196822ms 2024-12-09T03:26:16,582 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:26:16,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:16,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:16,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:26:16,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T03:26:16,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T03:26:16,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T03:26:16,818 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:26:16,822 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:26:16,823 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:26:16,823 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:26:16,823 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:26:16,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c1d88de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:26:16,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a7807c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:26:16,912 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a70e9fd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/java.io.tmpdir/jetty-localhost-36167-hadoop-hdfs-3_4_1-tests_jar-_-any-1537194686978090106/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:26:16,912 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f54bfcd{HTTP/1.1, (http/1.1)}{localhost:36167} 2024-12-09T03:26:16,912 INFO [Time-limited test {}] server.Server(415): Started @197162ms 2024-12-09T03:26:16,913 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:26:16,942 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:26:16,944 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:26:16,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:26:16,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:26:16,945 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:26:16,945 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3272a875{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:26:16,946 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6966feef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:26:17,034 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ec0789c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/java.io.tmpdir/jetty-localhost-33315-hadoop-hdfs-3_4_1-tests_jar-_-any-33822087333595328/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:26:17,034 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@60b4846b{HTTP/1.1, (http/1.1)}{localhost:33315} 2024-12-09T03:26:17,034 INFO [Time-limited test {}] server.Server(415): Started @197284ms 2024-12-09T03:26:17,035 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:26:17,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:17,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:18,111 WARN [Thread-1654 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac/data/data1/current/BP-1654235275-172.17.0.3-1733714776134/current, will proceed with Du for space computation calculation, 2024-12-09T03:26:18,111 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac/data/data2/current/BP-1654235275-172.17.0.3-1733714776134/current, will proceed with Du for space computation calculation, 2024-12-09T03:26:18,132 WARN [Thread-1618 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:26:18,134 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15b44d23fdf67a7c with lease ID 0x36f75f4f6399ee37: Processing first storage report for DS-7ae4e96f-bae9-44ea-bc76-a1299acc8763 from datanode DatanodeRegistration(127.0.0.1:39965, datanodeUuid=c0893080-d240-410a-9a64-cad7dfd78c32, infoPort=33153, infoSecurePort=0, ipcPort=42201, storageInfo=lv=-57;cid=testClusterID;nsid=970173676;c=1733714776134) 2024-12-09T03:26:18,134 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15b44d23fdf67a7c with lease ID 0x36f75f4f6399ee37: from storage DS-7ae4e96f-bae9-44ea-bc76-a1299acc8763 node DatanodeRegistration(127.0.0.1:39965, datanodeUuid=c0893080-d240-410a-9a64-cad7dfd78c32, infoPort=33153, infoSecurePort=0, ipcPort=42201, storageInfo=lv=-57;cid=testClusterID;nsid=970173676;c=1733714776134), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:26:18,134 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15b44d23fdf67a7c with lease ID 0x36f75f4f6399ee37: Processing first storage report for DS-5e678d96-2b5c-436b-857b-e0b3ba72e439 from datanode DatanodeRegistration(127.0.0.1:39965, datanodeUuid=c0893080-d240-410a-9a64-cad7dfd78c32, infoPort=33153, infoSecurePort=0, ipcPort=42201, storageInfo=lv=-57;cid=testClusterID;nsid=970173676;c=1733714776134) 2024-12-09T03:26:18,134 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15b44d23fdf67a7c with lease ID 0x36f75f4f6399ee37: from storage DS-5e678d96-2b5c-436b-857b-e0b3ba72e439 node DatanodeRegistration(127.0.0.1:39965, datanodeUuid=c0893080-d240-410a-9a64-cad7dfd78c32, infoPort=33153, infoSecurePort=0, ipcPort=42201, storageInfo=lv=-57;cid=testClusterID;nsid=970173676;c=1733714776134), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T03:26:18,267 WARN [Thread-1665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac/data/data3/current/BP-1654235275-172.17.0.3-1733714776134/current, will proceed with Du for space computation calculation, 2024-12-09T03:26:18,267 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac/data/data4/current/BP-1654235275-172.17.0.3-1733714776134/current, will proceed with Du for space computation calculation, 2024-12-09T03:26:18,286 WARN [Thread-1641 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:26:18,288 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb787d18bb96b1d84 with lease ID 0x36f75f4f6399ee38: Processing first storage report for DS-ac6f6e19-f425-49c9-9040-2517dea3628e from datanode DatanodeRegistration(127.0.0.1:44297, datanodeUuid=36ca2ec1-260c-4ccd-a6b4-4f2f8f4dcc8d, infoPort=37403, infoSecurePort=0, ipcPort=36547, storageInfo=lv=-57;cid=testClusterID;nsid=970173676;c=1733714776134) 2024-12-09T03:26:18,288 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb787d18bb96b1d84 with lease ID 0x36f75f4f6399ee38: from storage DS-ac6f6e19-f425-49c9-9040-2517dea3628e node DatanodeRegistration(127.0.0.1:44297, datanodeUuid=36ca2ec1-260c-4ccd-a6b4-4f2f8f4dcc8d, infoPort=37403, infoSecurePort=0, ipcPort=36547, storageInfo=lv=-57;cid=testClusterID;nsid=970173676;c=1733714776134), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:26:18,288 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb787d18bb96b1d84 with lease ID 0x36f75f4f6399ee38: Processing first storage report for DS-a30cfc7c-13ed-4e12-b937-da10ce057b79 from datanode DatanodeRegistration(127.0.0.1:44297, datanodeUuid=36ca2ec1-260c-4ccd-a6b4-4f2f8f4dcc8d, infoPort=37403, infoSecurePort=0, ipcPort=36547, storageInfo=lv=-57;cid=testClusterID;nsid=970173676;c=1733714776134) 2024-12-09T03:26:18,288 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb787d18bb96b1d84 with lease ID 0x36f75f4f6399ee38: from storage DS-a30cfc7c-13ed-4e12-b937-da10ce057b79 node DatanodeRegistration(127.0.0.1:44297, datanodeUuid=36ca2ec1-260c-4ccd-a6b4-4f2f8f4dcc8d, infoPort=37403, infoSecurePort=0, ipcPort=36547, storageInfo=lv=-57;cid=testClusterID;nsid=970173676;c=1733714776134), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:26:18,369 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661 2024-12-09T03:26:18,376 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac/zookeeper_0, clientPort=63767, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T03:26:18,378 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63767 2024-12-09T03:26:18,378 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:26:18,379 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:26:18,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:26:18,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:26:18,389 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079 with version=8 2024-12-09T03:26:18,389 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/hbase-staging 2024-12-09T03:26:18,390 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:26:18,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:26:18,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:26:18,390 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:26:18,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:26:18,390 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:26:18,391 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T03:26:18,391 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:26:18,392 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38361 2024-12-09T03:26:18,392 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38361 connecting to ZooKeeper ensemble=127.0.0.1:63767 2024-12-09T03:26:18,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:383610x0, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:26:18,463 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38361-0x100089c1fcd0000 connected 2024-12-09T03:26:18,546 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:26:18,549 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:26:18,554 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:26:18,554 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079, hbase.cluster.distributed=false 2024-12-09T03:26:18,556 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:26:18,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38361 2024-12-09T03:26:18,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38361 2024-12-09T03:26:18,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38361 2024-12-09T03:26:18,558 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38361 2024-12-09T03:26:18,558 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38361 2024-12-09T03:26:18,571 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:26:18,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:26:18,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:26:18,571 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:26:18,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:26:18,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:26:18,571 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:26:18,571 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:26:18,572 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33155 2024-12-09T03:26:18,573 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33155 connecting to ZooKeeper ensemble=127.0.0.1:63767 2024-12-09T03:26:18,573 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:26:18,575 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:26:18,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:331550x0, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:26:18,588 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:331550x0, quorum=127.0.0.1:63767, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:26:18,588 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33155-0x100089c1fcd0001 connected 2024-12-09T03:26:18,588 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:26:18,589 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:26:18,589 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:26:18,591 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:26:18,591 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33155 2024-12-09T03:26:18,591 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33155 2024-12-09T03:26:18,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33155 2024-12-09T03:26:18,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33155 2024-12-09T03:26:18,592 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33155 2024-12-09T03:26:18,604 DEBUG [M:0;1617b0b1421f:38361 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1617b0b1421f:38361 2024-12-09T03:26:18,604 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1617b0b1421f,38361,1733714778390 2024-12-09T03:26:18,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:26:18,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:26:18,609 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1617b0b1421f,38361,1733714778390 2024-12-09T03:26:18,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:18,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:26:18,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:18,619 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T03:26:18,620 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1617b0b1421f,38361,1733714778390 from backup master directory 2024-12-09T03:26:18,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:26:18,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1617b0b1421f,38361,1733714778390 2024-12-09T03:26:18,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:26:18,630 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:26:18,630 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1617b0b1421f,38361,1733714778390 2024-12-09T03:26:18,635 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/hbase.id] with ID: 294e92ba-c2df-4ebd-b342-38ffdcac07e6 2024-12-09T03:26:18,636 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/.tmp/hbase.id 2024-12-09T03:26:18,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:26:18,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:26:18,645 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/.tmp/hbase.id]:[hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/hbase.id] 2024-12-09T03:26:18,661 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:26:18,661 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T03:26:18,663 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T03:26:18,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:18,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:18,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:18,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:18,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:26:18,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:26:18,684 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:26:18,685 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T03:26:18,685 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:26:18,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:26:18,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:26:18,694 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store 2024-12-09T03:26:18,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:26:18,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:26:18,701 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:26:18,701 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:26:18,701 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:26:18,701 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:26:18,701 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:26:18,701 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:26:18,701 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:26:18,701 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714778701Disabling compacts and flushes for region at 1733714778701Disabling writes for close at 1733714778701Writing region close event to WAL at 1733714778701Closed at 1733714778701 2024-12-09T03:26:18,702 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/.initializing 2024-12-09T03:26:18,702 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/WALs/1617b0b1421f,38361,1733714778390 2024-12-09T03:26:18,704 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C38361%2C1733714778390, suffix=, logDir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/WALs/1617b0b1421f,38361,1733714778390, archiveDir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/oldWALs, maxLogs=10 2024-12-09T03:26:18,704 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C38361%2C1733714778390.1733714778704 2024-12-09T03:26:18,709 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/WALs/1617b0b1421f,38361,1733714778390/1617b0b1421f%2C38361%2C1733714778390.1733714778704 2024-12-09T03:26:18,711 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37403:37403),(127.0.0.1/127.0.0.1:33153:33153)] 2024-12-09T03:26:18,712 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:26:18,712 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:26:18,712 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,712 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,714 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T03:26:18,715 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:18,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:26:18,716 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T03:26:18,717 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:18,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:26:18,717 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T03:26:18,718 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:18,719 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:26:18,719 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,720 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T03:26:18,720 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:18,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:26:18,721 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,721 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,722 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,723 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,723 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,724 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:26:18,726 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:26:18,728 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:26:18,729 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804309, jitterRate=0.022732526063919067}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:26:18,730 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733714778712Initializing all the Stores at 1733714778713 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714778713Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714778713Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714778713Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714778713Cleaning up temporary data from old regions at 1733714778723 (+10 ms)Region opened successfully at 1733714778730 (+7 ms) 2024-12-09T03:26:18,730 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T03:26:18,733 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@646003b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:26:18,734 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T03:26:18,735 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T03:26:18,735 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T03:26:18,735 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T03:26:18,735 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T03:26:18,736 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T03:26:18,736 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T03:26:18,739 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T03:26:18,740 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T03:26:18,745 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T03:26:18,746 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T03:26:18,747 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T03:26:18,756 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T03:26:18,757 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T03:26:18,759 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T03:26:18,766 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T03:26:18,768 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T03:26:18,777 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T03:26:18,781 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T03:26:18,788 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T03:26:18,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:26:18,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:26:18,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:18,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:18,798 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1617b0b1421f,38361,1733714778390, sessionid=0x100089c1fcd0000, setting cluster-up flag (Was=false) 2024-12-09T03:26:18,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:18,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:18,930 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T03:26:18,934 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,38361,1733714778390 2024-12-09T03:26:19,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:19,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:19,082 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T03:26:19,086 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,38361,1733714778390 2024-12-09T03:26:19,089 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T03:26:19,093 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T03:26:19,093 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T03:26:19,093 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T03:26:19,094 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1617b0b1421f,38361,1733714778390 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T03:26:19,094 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(746): ClusterId : 294e92ba-c2df-4ebd-b342-38ffdcac07e6 2024-12-09T03:26:19,094 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:26:19,104 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:26:19,104 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:26:19,104 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:26:19,104 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:26:19,104 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:26:19,104 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1617b0b1421f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T03:26:19,104 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:26:19,104 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,104 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:26:19,104 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,105 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733714809105 2024-12-09T03:26:19,105 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T03:26:19,105 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T03:26:19,105 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T03:26:19,105 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T03:26:19,105 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T03:26:19,105 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T03:26:19,105 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,105 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T03:26:19,105 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T03:26:19,105 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T03:26:19,105 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:26:19,106 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T03:26:19,106 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T03:26:19,106 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T03:26:19,106 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714779106,5,FailOnTimeoutGroup] 2024-12-09T03:26:19,106 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714779106,5,FailOnTimeoutGroup] 2024-12-09T03:26:19,106 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,106 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T03:26:19,106 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,106 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,107 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:19,107 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T03:26:19,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:26:19,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:26:19,114 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T03:26:19,114 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:26:19,114 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079 2024-12-09T03:26:19,115 DEBUG [RS:0;1617b0b1421f:33155 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e44ccd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:26:19,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:26:19,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:26:19,121 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:26:19,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:26:19,123 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:26:19,123 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:19,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:26:19,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:26:19,124 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:26:19,124 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:19,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:26:19,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:26:19,126 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:26:19,126 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:19,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:26:19,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:26:19,127 DEBUG [RS:0;1617b0b1421f:33155 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1617b0b1421f:33155 2024-12-09T03:26:19,127 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:26:19,127 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:26:19,127 DEBUG [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:26:19,127 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:26:19,127 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:19,127 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(2659): reportForDuty to master=1617b0b1421f,38361,1733714778390 with port=33155, startcode=1733714778570 2024-12-09T03:26:19,128 DEBUG [RS:0;1617b0b1421f:33155 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:26:19,128 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:26:19,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:26:19,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740 2024-12-09T03:26:19,129 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740 2024-12-09T03:26:19,129 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49859, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:26:19,130 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38361 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1617b0b1421f,33155,1733714778570 2024-12-09T03:26:19,130 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38361 {}] master.ServerManager(517): Registering regionserver=1617b0b1421f,33155,1733714778570 2024-12-09T03:26:19,130 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:26:19,130 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:26:19,131 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:26:19,131 DEBUG [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079 2024-12-09T03:26:19,131 DEBUG [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39429 2024-12-09T03:26:19,131 DEBUG [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:26:19,132 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:26:19,133 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:26:19,134 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716489, jitterRate=-0.08893807232379913}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:26:19,134 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733714779121Initializing all the Stores at 1733714779122 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714779122Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714779122Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714779122Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714779122Cleaning up temporary data from old regions at 1733714779130 (+8 ms)Region opened successfully at 1733714779134 (+4 ms) 2024-12-09T03:26:19,134 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:26:19,134 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:26:19,134 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:26:19,134 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:26:19,134 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:26:19,135 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:26:19,135 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714779134Disabling compacts and flushes for region at 1733714779134Disabling writes for close at 1733714779134Writing region close event to WAL at 1733714779135 (+1 ms)Closed at 1733714779135 2024-12-09T03:26:19,136 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:26:19,136 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T03:26:19,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T03:26:19,137 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:26:19,139 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T03:26:19,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:26:19,144 DEBUG [RS:0;1617b0b1421f:33155 {}] zookeeper.ZKUtil(111): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1617b0b1421f,33155,1733714778570 2024-12-09T03:26:19,144 WARN [RS:0;1617b0b1421f:33155 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:26:19,144 INFO [RS:0;1617b0b1421f:33155 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:26:19,144 DEBUG [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570 2024-12-09T03:26:19,144 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1617b0b1421f,33155,1733714778570] 2024-12-09T03:26:19,148 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:26:19,149 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:26:19,149 INFO [RS:0;1617b0b1421f:33155 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:26:19,149 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,150 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:26:19,150 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:26:19,150 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,151 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:26:19,152 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:26:19,152 DEBUG [RS:0;1617b0b1421f:33155 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:26:19,152 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,152 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,152 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,152 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,152 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,152 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,33155,1733714778570-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:26:19,170 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:26:19,170 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,33155,1733714778570-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,171 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,171 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.Replication(171): 1617b0b1421f,33155,1733714778570 started 2024-12-09T03:26:19,181 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,181 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(1482): Serving as 1617b0b1421f,33155,1733714778570, RpcServer on 1617b0b1421f/172.17.0.3:33155, sessionid=0x100089c1fcd0001 2024-12-09T03:26:19,181 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:26:19,181 DEBUG [RS:0;1617b0b1421f:33155 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1617b0b1421f,33155,1733714778570 2024-12-09T03:26:19,182 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,33155,1733714778570' 2024-12-09T03:26:19,182 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:26:19,182 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:26:19,183 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:26:19,183 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:26:19,183 DEBUG [RS:0;1617b0b1421f:33155 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1617b0b1421f,33155,1733714778570 2024-12-09T03:26:19,183 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,33155,1733714778570' 2024-12-09T03:26:19,183 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:26:19,183 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:26:19,184 DEBUG [RS:0;1617b0b1421f:33155 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:26:19,184 INFO [RS:0;1617b0b1421f:33155 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:26:19,184 INFO [RS:0;1617b0b1421f:33155 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:26:19,288 INFO [RS:0;1617b0b1421f:33155 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C33155%2C1733714778570, suffix=, logDir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570, archiveDir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/oldWALs, maxLogs=32 2024-12-09T03:26:19,289 INFO [RS:0;1617b0b1421f:33155 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C33155%2C1733714778570.1733714779288 2024-12-09T03:26:19,289 WARN [1617b0b1421f:38361 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T03:26:19,298 INFO [RS:0;1617b0b1421f:33155 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.1733714779288 2024-12-09T03:26:19,299 DEBUG [RS:0;1617b0b1421f:33155 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37403:37403),(127.0.0.1/127.0.0.1:33153:33153)] 2024-12-09T03:26:19,539 DEBUG [1617b0b1421f:38361 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T03:26:19,540 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1617b0b1421f,33155,1733714778570 2024-12-09T03:26:19,544 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,33155,1733714778570, state=OPENING 2024-12-09T03:26:19,609 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T03:26:19,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:19,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:26:19,621 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:26:19,621 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:26:19,621 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:26:19,621 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,33155,1733714778570}] 2024-12-09T03:26:19,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:19,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:19,777 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T03:26:19,781 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49693, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T03:26:19,786 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T03:26:19,786 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:26:19,789 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C33155%2C1733714778570.meta, suffix=.meta, logDir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570, archiveDir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/oldWALs, maxLogs=32 2024-12-09T03:26:19,789 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C33155%2C1733714778570.meta.1733714779789.meta 2024-12-09T03:26:19,797 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.meta.1733714779789.meta 2024-12-09T03:26:19,799 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37403:37403),(127.0.0.1/127.0.0.1:33153:33153)] 2024-12-09T03:26:19,800 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:26:19,800 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T03:26:19,800 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T03:26:19,801 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T03:26:19,801 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T03:26:19,801 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:26:19,801 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T03:26:19,801 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T03:26:19,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:26:19,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:26:19,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:19,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:26:19,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:26:19,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:26:19,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:19,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:26:19,806 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:26:19,807 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:26:19,807 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:19,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:26:19,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:26:19,808 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:26:19,808 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:19,808 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:26:19,808 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:26:19,809 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740 2024-12-09T03:26:19,810 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740 2024-12-09T03:26:19,811 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:26:19,811 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:26:19,811 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:26:19,813 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:26:19,814 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824202, jitterRate=0.048028022050857544}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:26:19,814 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T03:26:19,814 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733714779801Writing region info on filesystem at 1733714779801Initializing all the Stores at 1733714779802 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714779803 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714779803Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714779803Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714779803Cleaning up temporary data from old regions at 1733714779811 (+8 ms)Running coprocessor post-open hooks at 1733714779814 (+3 ms)Region opened successfully at 1733714779814 2024-12-09T03:26:19,815 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733714779776 2024-12-09T03:26:19,817 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T03:26:19,817 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T03:26:19,819 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,33155,1733714778570 2024-12-09T03:26:19,819 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,33155,1733714778570, state=OPEN 2024-12-09T03:26:19,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:26:19,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:26:19,857 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1617b0b1421f,33155,1733714778570 2024-12-09T03:26:19,857 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:26:19,858 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:26:19,863 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T03:26:19,863 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,33155,1733714778570 in 236 msec 2024-12-09T03:26:19,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T03:26:19,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 727 msec 2024-12-09T03:26:19,867 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:26:19,867 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T03:26:19,869 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:26:19,869 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,33155,1733714778570, seqNum=-1] 2024-12-09T03:26:19,869 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:26:19,870 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33701, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:26:19,876 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 784 msec 2024-12-09T03:26:19,876 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733714779876, completionTime=-1 2024-12-09T03:26:19,877 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T03:26:19,877 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T03:26:19,879 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T03:26:19,879 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733714839879 2024-12-09T03:26:19,879 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733714899879 2024-12-09T03:26:19,879 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T03:26:19,879 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38361,1733714778390-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,879 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38361,1733714778390-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,879 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38361,1733714778390-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,879 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1617b0b1421f:38361, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,880 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,880 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,881 DEBUG [master/1617b0b1421f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T03:26:19,883 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.253sec 2024-12-09T03:26:19,883 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T03:26:19,883 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T03:26:19,883 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T03:26:19,883 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T03:26:19,883 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T03:26:19,883 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38361,1733714778390-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:26:19,883 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38361,1733714778390-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T03:26:19,885 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T03:26:19,885 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T03:26:19,885 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38361,1733714778390-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:26:19,895 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19ceb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:26:19,895 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1617b0b1421f,38361,-1 for getting cluster id 2024-12-09T03:26:19,895 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T03:26:19,897 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '294e92ba-c2df-4ebd-b342-38ffdcac07e6' 2024-12-09T03:26:19,897 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T03:26:19,897 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "294e92ba-c2df-4ebd-b342-38ffdcac07e6" 2024-12-09T03:26:19,898 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66106cb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:26:19,898 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1617b0b1421f,38361,-1] 2024-12-09T03:26:19,898 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T03:26:19,898 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:26:19,899 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49198, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T03:26:19,900 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60cbbc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:26:19,901 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:26:19,902 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,33155,1733714778570, seqNum=-1] 2024-12-09T03:26:19,902 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:26:19,903 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41962, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:26:19,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1617b0b1421f,38361,1733714778390 2024-12-09T03:26:19,905 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:26:19,908 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T03:26:19,909 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T03:26:19,910 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 1617b0b1421f,38361,1733714778390 2024-12-09T03:26:19,910 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1222f315 2024-12-09T03:26:19,910 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T03:26:19,911 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49206, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T03:26:19,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T03:26:19,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T03:26:19,912 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:26:19,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:26:19,915 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T03:26:19,915 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:19,915 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-09T03:26:19,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:26:19,916 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T03:26:19,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741835_1011 (size=405) 2024-12-09T03:26:19,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741835_1011 (size=405) 2024-12-09T03:26:19,924 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 842d81cb695330dd41438202d17ee84c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079 2024-12-09T03:26:19,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741836_1012 (size=88) 2024-12-09T03:26:19,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741836_1012 (size=88) 2024-12-09T03:26:19,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:26:19,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 842d81cb695330dd41438202d17ee84c, disabling compactions & flushes 2024-12-09T03:26:19,931 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:19,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:19,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. after waiting 0 ms 2024-12-09T03:26:19,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:19,931 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:19,931 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 842d81cb695330dd41438202d17ee84c: Waiting for close lock at 1733714779931Disabling compacts and flushes for region at 1733714779931Disabling writes for close at 1733714779931Writing region close event to WAL at 1733714779931Closed at 1733714779931 2024-12-09T03:26:19,933 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T03:26:19,933 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733714779933"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733714779933"}]},"ts":"1733714779933"} 2024-12-09T03:26:19,935 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T03:26:19,936 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T03:26:19,936 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733714779936"}]},"ts":"1733714779936"} 2024-12-09T03:26:19,939 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-09T03:26:19,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=842d81cb695330dd41438202d17ee84c, ASSIGN}] 2024-12-09T03:26:19,940 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=842d81cb695330dd41438202d17ee84c, ASSIGN 2024-12-09T03:26:19,941 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=842d81cb695330dd41438202d17ee84c, ASSIGN; state=OFFLINE, location=1617b0b1421f,33155,1733714778570; forceNewPlan=false, retain=false 2024-12-09T03:26:20,092 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=842d81cb695330dd41438202d17ee84c, regionState=OPENING, regionLocation=1617b0b1421f,33155,1733714778570 2024-12-09T03:26:20,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=842d81cb695330dd41438202d17ee84c, ASSIGN because future has completed 2024-12-09T03:26:20,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 842d81cb695330dd41438202d17ee84c, server=1617b0b1421f,33155,1733714778570}] 2024-12-09T03:26:20,258 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:20,258 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 842d81cb695330dd41438202d17ee84c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:26:20,259 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 842d81cb695330dd41438202d17ee84c 2024-12-09T03:26:20,259 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:26:20,259 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 842d81cb695330dd41438202d17ee84c 2024-12-09T03:26:20,259 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 842d81cb695330dd41438202d17ee84c 2024-12-09T03:26:20,262 INFO [StoreOpener-842d81cb695330dd41438202d17ee84c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 842d81cb695330dd41438202d17ee84c 2024-12-09T03:26:20,265 INFO [StoreOpener-842d81cb695330dd41438202d17ee84c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 842d81cb695330dd41438202d17ee84c columnFamilyName info 2024-12-09T03:26:20,265 DEBUG [StoreOpener-842d81cb695330dd41438202d17ee84c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:26:20,266 INFO [StoreOpener-842d81cb695330dd41438202d17ee84c-1 {}] regionserver.HStore(327): Store=842d81cb695330dd41438202d17ee84c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:26:20,266 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 842d81cb695330dd41438202d17ee84c 2024-12-09T03:26:20,267 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c 2024-12-09T03:26:20,267 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c 2024-12-09T03:26:20,268 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 842d81cb695330dd41438202d17ee84c 2024-12-09T03:26:20,268 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 842d81cb695330dd41438202d17ee84c 2024-12-09T03:26:20,269 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 842d81cb695330dd41438202d17ee84c 2024-12-09T03:26:20,271 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:26:20,272 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 842d81cb695330dd41438202d17ee84c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856421, jitterRate=0.08899612724781036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T03:26:20,272 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 842d81cb695330dd41438202d17ee84c 2024-12-09T03:26:20,272 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 842d81cb695330dd41438202d17ee84c: Running coprocessor pre-open hook at 1733714780259Writing region info on filesystem at 1733714780259Initializing all the Stores at 1733714780261 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714780261Cleaning up temporary data from old regions at 1733714780268 (+7 ms)Running coprocessor post-open hooks at 1733714780272 (+4 ms)Region opened successfully at 1733714780272 2024-12-09T03:26:20,273 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c., pid=6, masterSystemTime=1733714780251 2024-12-09T03:26:20,275 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:20,275 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:20,276 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=842d81cb695330dd41438202d17ee84c, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,33155,1733714778570 2024-12-09T03:26:20,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 842d81cb695330dd41438202d17ee84c, server=1617b0b1421f,33155,1733714778570 because future has completed 2024-12-09T03:26:20,281 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T03:26:20,281 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 842d81cb695330dd41438202d17ee84c, server=1617b0b1421f,33155,1733714778570 in 182 msec 2024-12-09T03:26:20,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T03:26:20,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=842d81cb695330dd41438202d17ee84c, ASSIGN in 342 msec 2024-12-09T03:26:20,286 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T03:26:20,286 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733714780286"}]},"ts":"1733714780286"} 2024-12-09T03:26:20,288 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-09T03:26:20,289 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T03:26:20,291 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 377 msec 2024-12-09T03:26:20,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:20,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:21,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:21,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:22,250 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:26:22,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:26:22,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:22,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:23,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:23,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:24,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:24,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:25,148 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T03:26:25,150 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-09T03:26:25,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:25,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:26,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:26,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:26,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T03:26:26,745 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T03:26:26,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:26:26,746 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T03:26:26,747 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T03:26:26,747 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T03:26:26,747 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:26:26,748 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T03:26:27,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:27,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:28,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:28,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:29,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:29,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:29,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:26:29,963 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T03:26:29,964 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-09T03:26:29,970 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:26:29,970 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:29,975 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c., hostname=1617b0b1421f,33155,1733714778570, seqNum=2] 2024-12-09T03:26:29,983 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:26:29,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:26:29,989 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T03:26:29,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T03:26:29,991 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T03:26:29,992 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T03:26:30,159 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T03:26:30,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:30,160 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 842d81cb695330dd41438202d17ee84c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T03:26:30,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/89f767f5c4f943dfaff99437470b6dda is 1080, key is row0001/info:/1733714789976/Put/seqid=0 2024-12-09T03:26:30,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741837_1013 (size=6033) 2024-12-09T03:26:30,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741837_1013 (size=6033) 2024-12-09T03:26:30,178 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/89f767f5c4f943dfaff99437470b6dda 2024-12-09T03:26:30,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/89f767f5c4f943dfaff99437470b6dda as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/89f767f5c4f943dfaff99437470b6dda 2024-12-09T03:26:30,192 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/89f767f5c4f943dfaff99437470b6dda, entries=1, sequenceid=5, filesize=5.9 K 2024-12-09T03:26:30,193 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 842d81cb695330dd41438202d17ee84c in 33ms, sequenceid=5, compaction requested=false 2024-12-09T03:26:30,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 842d81cb695330dd41438202d17ee84c: 2024-12-09T03:26:30,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:30,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T03:26:30,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T03:26:30,199 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T03:26:30,199 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 205 msec 2024-12-09T03:26:30,202 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 216 msec 2024-12-09T03:26:30,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:30,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:31,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:31,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:32,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:32,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:33,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:33,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:34,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:34,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:35,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:35,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:36,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:36,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:37,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:37,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:38,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:38,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:39,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:39,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T03:26:40,003 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T03:26:40,010 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:26:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:26:40,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T03:26:40,015 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T03:26:40,017 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T03:26:40,017 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T03:26:40,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-09T03:26:40,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:40,172 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 842d81cb695330dd41438202d17ee84c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T03:26:40,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/2d6a36502eae461abdf19f00c8fe97e7 is 1080, key is row0002/info:/1733714800005/Put/seqid=0 2024-12-09T03:26:40,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741838_1014 (size=6033) 2024-12-09T03:26:40,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741838_1014 (size=6033) 2024-12-09T03:26:40,188 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/2d6a36502eae461abdf19f00c8fe97e7 2024-12-09T03:26:40,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/2d6a36502eae461abdf19f00c8fe97e7 as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/2d6a36502eae461abdf19f00c8fe97e7 2024-12-09T03:26:40,203 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/2d6a36502eae461abdf19f00c8fe97e7, entries=1, sequenceid=9, filesize=5.9 K 2024-12-09T03:26:40,204 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 842d81cb695330dd41438202d17ee84c in 33ms, sequenceid=9, compaction requested=false 2024-12-09T03:26:40,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 842d81cb695330dd41438202d17ee84c: 2024-12-09T03:26:40,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:40,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-09T03:26:40,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-09T03:26:40,209 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T03:26:40,209 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-12-09T03:26:40,212 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 200 msec 2024-12-09T03:26:40,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:40,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 after 68066ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:26:40,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:40,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta after 68052ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T03:26:41,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:41,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:42,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:42,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:43,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:43,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:44,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:44,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:45,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:45,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:46,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:46,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:47,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:47,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:48,368 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T03:26:48,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:48,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:49,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:49,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:50,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T03:26:50,092 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T03:26:50,095 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C33155%2C1733714778570.1733714810095 2024-12-09T03:26:50,101 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:50,101 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:50,101 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:50,101 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:50,101 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:26:50,102 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.1733714779288 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.1733714810095 2024-12-09T03:26:50,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741833_1009 (size=5546) 2024-12-09T03:26:50,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741833_1009 (size=5546) 2024-12-09T03:26:50,109 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37403:37403),(127.0.0.1/127.0.0.1:33153:33153)] 2024-12-09T03:26:50,110 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:26:50,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:26:50,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T03:26:50,113 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T03:26:50,114 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T03:26:50,114 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T03:26:50,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-09T03:26:50,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:50,268 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 842d81cb695330dd41438202d17ee84c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T03:26:50,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/607f456f31394a1295f0626302cc0119 is 1080, key is row0003/info:/1733714810093/Put/seqid=0 2024-12-09T03:26:50,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741840_1016 (size=6033) 2024-12-09T03:26:50,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741840_1016 (size=6033) 2024-12-09T03:26:50,679 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/607f456f31394a1295f0626302cc0119 2024-12-09T03:26:50,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/607f456f31394a1295f0626302cc0119 as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/607f456f31394a1295f0626302cc0119 2024-12-09T03:26:50,692 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/607f456f31394a1295f0626302cc0119, entries=1, sequenceid=13, filesize=5.9 K 2024-12-09T03:26:50,693 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 842d81cb695330dd41438202d17ee84c in 425ms, sequenceid=13, compaction requested=true 2024-12-09T03:26:50,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 842d81cb695330dd41438202d17ee84c: 2024-12-09T03:26:50,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:26:50,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-09T03:26:50,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-09T03:26:50,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-09T03:26:50,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 581 msec 2024-12-09T03:26:50,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:50,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 589 msec 2024-12-09T03:26:50,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:51,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:51,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:52,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:52,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:53,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:53,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:54,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:54,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:55,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:55,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:56,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:56,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:57,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:57,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:58,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:58,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:59,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:26:59,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T03:27:00,122 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T03:27:00,122 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:27:00,123 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:27:00,123 DEBUG [Time-limited test {}] regionserver.HStore(1541): 842d81cb695330dd41438202d17ee84c/info is initiating minor compaction (all files) 2024-12-09T03:27:00,123 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:27:00,123 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:00,123 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 842d81cb695330dd41438202d17ee84c/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:27:00,123 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/89f767f5c4f943dfaff99437470b6dda, hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/2d6a36502eae461abdf19f00c8fe97e7, hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/607f456f31394a1295f0626302cc0119] into tmpdir=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp, totalSize=17.7 K 2024-12-09T03:27:00,124 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 89f767f5c4f943dfaff99437470b6dda, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733714789976 2024-12-09T03:27:00,124 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 2d6a36502eae461abdf19f00c8fe97e7, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733714800005 2024-12-09T03:27:00,125 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 607f456f31394a1295f0626302cc0119, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733714810093 2024-12-09T03:27:00,140 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 842d81cb695330dd41438202d17ee84c#info#compaction#44 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:27:00,140 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/11fb062c24bd4152801b24df24fba4a2 is 1080, key is row0001/info:/1733714789976/Put/seqid=0 2024-12-09T03:27:00,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741841_1017 (size=8296) 2024-12-09T03:27:00,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741841_1017 (size=8296) 2024-12-09T03:27:00,152 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/11fb062c24bd4152801b24df24fba4a2 as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/11fb062c24bd4152801b24df24fba4a2 2024-12-09T03:27:00,160 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 842d81cb695330dd41438202d17ee84c/info of 842d81cb695330dd41438202d17ee84c into 11fb062c24bd4152801b24df24fba4a2(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:27:00,160 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 842d81cb695330dd41438202d17ee84c: 2024-12-09T03:27:00,163 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C33155%2C1733714778570.1733714820162 2024-12-09T03:27:00,168 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:00,169 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:00,169 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:00,169 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:00,169 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:00,169 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.1733714810095 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.1733714820162 2024-12-09T03:27:00,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741839_1015 (size=2520) 2024-12-09T03:27:00,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741839_1015 (size=2520) 2024-12-09T03:27:00,178 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.1733714779288 to hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/oldWALs/1617b0b1421f%2C33155%2C1733714778570.1733714779288 2024-12-09T03:27:00,178 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33153:33153),(127.0.0.1/127.0.0.1:37403:37403)] 2024-12-09T03:27:00,179 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:27:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:27:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T03:27:00,182 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T03:27:00,183 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T03:27:00,183 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T03:27:00,335 INFO [master/1617b0b1421f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T03:27:00,335 INFO [master/1617b0b1421f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T03:27:00,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-09T03:27:00,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:27:00,336 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 842d81cb695330dd41438202d17ee84c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T03:27:00,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/f661d54043a9427ba98866416589f9e5 is 1080, key is row0000/info:/1733714820161/Put/seqid=0 2024-12-09T03:27:00,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741843_1019 (size=6033) 2024-12-09T03:27:00,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741843_1019 (size=6033) 2024-12-09T03:27:00,351 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/f661d54043a9427ba98866416589f9e5 2024-12-09T03:27:00,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/f661d54043a9427ba98866416589f9e5 as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/f661d54043a9427ba98866416589f9e5 2024-12-09T03:27:00,363 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/f661d54043a9427ba98866416589f9e5, entries=1, sequenceid=18, filesize=5.9 K 2024-12-09T03:27:00,365 INFO [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 842d81cb695330dd41438202d17ee84c in 29ms, sequenceid=18, compaction requested=false 2024-12-09T03:27:00,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 842d81cb695330dd41438202d17ee84c: 2024-12-09T03:27:00,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:27:00,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-09T03:27:00,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-09T03:27:00,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-09T03:27:00,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-12-09T03:27:00,372 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-12-09T03:27:00,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:00,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:01,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:01,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:02,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:02,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:03,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:03,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:04,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:04,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:05,259 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 842d81cb695330dd41438202d17ee84c, had cached 0 bytes from a total of 14329 2024-12-09T03:27:05,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:05,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:06,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:06,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:07,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:07,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:08,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:08,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:09,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:09,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:10,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38361 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T03:27:10,192 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T03:27:10,196 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C33155%2C1733714778570.1733714830196 2024-12-09T03:27:10,204 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,204 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,204 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,204 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,204 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,204 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.1733714820162 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.1733714830196 2024-12-09T03:27:10,205 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33153:33153),(127.0.0.1/127.0.0.1:37403:37403)] 2024-12-09T03:27:10,205 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.1733714820162 is not closed yet, will try archiving it next time 2024-12-09T03:27:10,205 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.1733714810095 to hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/oldWALs/1617b0b1421f%2C33155%2C1733714778570.1733714810095 2024-12-09T03:27:10,205 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T03:27:10,206 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:27:10,206 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:27:10,206 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:27:10,206 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:27:10,206 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T03:27:10,206 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T03:27:10,206 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=72512465, stopped=false 2024-12-09T03:27:10,206 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1617b0b1421f,38361,1733714778390 2024-12-09T03:27:10,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741842_1018 (size=2026) 2024-12-09T03:27:10,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741842_1018 (size=2026) 2024-12-09T03:27:10,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:27:10,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:27:10,268 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:27:10,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:10,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:10,268 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:27:10,268 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:27:10,268 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:27:10,268 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1617b0b1421f,33155,1733714778570' ***** 2024-12-09T03:27:10,268 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:27:10,268 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:27:10,269 INFO [RS:0;1617b0b1421f:33155 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:27:10,269 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:27:10,269 INFO [RS:0;1617b0b1421f:33155 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:27:10,269 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(3091): Received CLOSE for 842d81cb695330dd41438202d17ee84c 2024-12-09T03:27:10,269 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(959): stopping server 1617b0b1421f,33155,1733714778570 2024-12-09T03:27:10,269 INFO [RS:0;1617b0b1421f:33155 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:27:10,269 INFO [RS:0;1617b0b1421f:33155 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1617b0b1421f:33155. 2024-12-09T03:27:10,269 DEBUG [RS:0;1617b0b1421f:33155 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:27:10,269 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 842d81cb695330dd41438202d17ee84c, disabling compactions & flushes 2024-12-09T03:27:10,269 DEBUG [RS:0;1617b0b1421f:33155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:27:10,269 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:27:10,269 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:27:10,269 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:27:10,269 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. after waiting 0 ms 2024-12-09T03:27:10,269 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:27:10,269 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:27:10,269 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:27:10,269 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T03:27:10,269 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 842d81cb695330dd41438202d17ee84c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T03:27:10,269 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T03:27:10,269 DEBUG [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(1325): Online Regions={842d81cb695330dd41438202d17ee84c=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T03:27:10,269 DEBUG [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 842d81cb695330dd41438202d17ee84c 2024-12-09T03:27:10,269 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:27:10,270 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:27:10,270 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:27:10,270 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:27:10,270 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:27:10,270 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-09T03:27:10,273 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:27:10,273 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/478cd6a107ab4a918786be6422a3439d is 1080, key is row0001/info:/1733714830194/Put/seqid=0 2024-12-09T03:27:10,273 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:27:10,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741845_1021 (size=6033) 2024-12-09T03:27:10,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741845_1021 (size=6033) 2024-12-09T03:27:10,282 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/478cd6a107ab4a918786be6422a3439d 2024-12-09T03:27:10,284 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/.tmp/info/d205f78e9d5a462f9a894ee8c5b0c43e is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c./info:regioninfo/1733714780276/Put/seqid=0 2024-12-09T03:27:10,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741846_1022 (size=7308) 2024-12-09T03:27:10,289 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/.tmp/info/478cd6a107ab4a918786be6422a3439d as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/478cd6a107ab4a918786be6422a3439d 2024-12-09T03:27:10,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741846_1022 (size=7308) 2024-12-09T03:27:10,290 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/.tmp/info/d205f78e9d5a462f9a894ee8c5b0c43e 2024-12-09T03:27:10,295 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/478cd6a107ab4a918786be6422a3439d, entries=1, sequenceid=22, filesize=5.9 K 2024-12-09T03:27:10,296 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 842d81cb695330dd41438202d17ee84c in 27ms, sequenceid=22, compaction requested=true 2024-12-09T03:27:10,297 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/89f767f5c4f943dfaff99437470b6dda, hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/2d6a36502eae461abdf19f00c8fe97e7, hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/607f456f31394a1295f0626302cc0119] to archive 2024-12-09T03:27:10,298 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T03:27:10,299 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/89f767f5c4f943dfaff99437470b6dda to hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/89f767f5c4f943dfaff99437470b6dda 2024-12-09T03:27:10,300 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/2d6a36502eae461abdf19f00c8fe97e7 to hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/2d6a36502eae461abdf19f00c8fe97e7 2024-12-09T03:27:10,301 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/607f456f31394a1295f0626302cc0119 to hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/info/607f456f31394a1295f0626302cc0119 2024-12-09T03:27:10,302 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1617b0b1421f:38361 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T03:27:10,302 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [89f767f5c4f943dfaff99437470b6dda=6033, 2d6a36502eae461abdf19f00c8fe97e7=6033, 607f456f31394a1295f0626302cc0119=6033] 2024-12-09T03:27:10,306 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/842d81cb695330dd41438202d17ee84c/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-09T03:27:10,307 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:27:10,307 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 842d81cb695330dd41438202d17ee84c: Waiting for close lock at 1733714830269Running coprocessor pre-close hooks at 1733714830269Disabling compacts and flushes for region at 1733714830269Disabling writes for close at 1733714830269Obtaining lock to block concurrent updates at 1733714830269Preparing flush snapshotting stores in 842d81cb695330dd41438202d17ee84c at 1733714830269Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733714830270 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. at 1733714830270Flushing 842d81cb695330dd41438202d17ee84c/info: creating writer at 1733714830270Flushing 842d81cb695330dd41438202d17ee84c/info: appending metadata at 1733714830272 (+2 ms)Flushing 842d81cb695330dd41438202d17ee84c/info: closing flushed file at 1733714830272Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2da0479c: reopening flushed file at 1733714830288 (+16 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 842d81cb695330dd41438202d17ee84c in 27ms, sequenceid=22, compaction requested=true at 1733714830297 (+9 ms)Writing region close event to WAL at 1733714830303 (+6 ms)Running coprocessor post-close hooks at 1733714830306 (+3 ms)Closed at 1733714830306 2024-12-09T03:27:10,307 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733714779912.842d81cb695330dd41438202d17ee84c. 2024-12-09T03:27:10,313 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/.tmp/ns/7b890c4c7cb44bdfba57a9f03256b650 is 43, key is default/ns:d/1733714779871/Put/seqid=0 2024-12-09T03:27:10,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741847_1023 (size=5153) 2024-12-09T03:27:10,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741847_1023 (size=5153) 2024-12-09T03:27:10,318 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/.tmp/ns/7b890c4c7cb44bdfba57a9f03256b650 2024-12-09T03:27:10,338 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/.tmp/table/5316541e380e41a3ab9d540ef1b15240 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733714780286/Put/seqid=0 2024-12-09T03:27:10,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741848_1024 (size=5508) 2024-12-09T03:27:10,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741848_1024 (size=5508) 2024-12-09T03:27:10,343 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/.tmp/table/5316541e380e41a3ab9d540ef1b15240 2024-12-09T03:27:10,349 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/.tmp/info/d205f78e9d5a462f9a894ee8c5b0c43e as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/info/d205f78e9d5a462f9a894ee8c5b0c43e 2024-12-09T03:27:10,355 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/info/d205f78e9d5a462f9a894ee8c5b0c43e, entries=10, sequenceid=11, filesize=7.1 K 2024-12-09T03:27:10,356 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/.tmp/ns/7b890c4c7cb44bdfba57a9f03256b650 as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/ns/7b890c4c7cb44bdfba57a9f03256b650 2024-12-09T03:27:10,362 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/ns/7b890c4c7cb44bdfba57a9f03256b650, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T03:27:10,363 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/.tmp/table/5316541e380e41a3ab9d540ef1b15240 as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/table/5316541e380e41a3ab9d540ef1b15240 2024-12-09T03:27:10,369 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/table/5316541e380e41a3ab9d540ef1b15240, entries=2, sequenceid=11, filesize=5.4 K 2024-12-09T03:27:10,370 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-12-09T03:27:10,377 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T03:27:10,378 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:27:10,378 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:27:10,378 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714830269Running coprocessor pre-close hooks at 1733714830269Disabling compacts and flushes for region at 1733714830269Disabling writes for close at 1733714830270 (+1 ms)Obtaining lock to block concurrent updates at 1733714830270Preparing flush snapshotting stores in 1588230740 at 1733714830270Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733714830270Flushing stores of hbase:meta,,1.1588230740 at 1733714830270Flushing 1588230740/info: creating writer at 1733714830270Flushing 1588230740/info: appending metadata at 1733714830284 (+14 ms)Flushing 1588230740/info: closing flushed file at 1733714830284Flushing 1588230740/ns: creating writer at 1733714830295 (+11 ms)Flushing 1588230740/ns: appending metadata at 1733714830313 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1733714830313Flushing 1588230740/table: creating writer at 1733714830323 (+10 ms)Flushing 1588230740/table: appending metadata at 1733714830337 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733714830337Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4031ac34: reopening flushed file at 1733714830348 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74ffe6b5: reopening flushed file at 1733714830356 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@676476c9: reopening flushed file at 1733714830363 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false at 1733714830370 (+7 ms)Writing region close event to WAL at 1733714830374 (+4 ms)Running coprocessor post-close hooks at 1733714830378 (+4 ms)Closed at 1733714830378 2024-12-09T03:27:10,378 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T03:27:10,470 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(976): stopping server 1617b0b1421f,33155,1733714778570; all regions closed. 2024-12-09T03:27:10,470 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,470 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,470 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,470 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,470 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741834_1010 (size=3306) 2024-12-09T03:27:10,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741834_1010 (size=3306) 2024-12-09T03:27:10,475 DEBUG [RS:0;1617b0b1421f:33155 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/oldWALs 2024-12-09T03:27:10,475 INFO [RS:0;1617b0b1421f:33155 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C33155%2C1733714778570.meta:.meta(num 1733714779789) 2024-12-09T03:27:10,475 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,475 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,475 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,475 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,475 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741844_1020 (size=1252) 2024-12-09T03:27:10,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741844_1020 (size=1252) 2024-12-09T03:27:10,608 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/WALs/1617b0b1421f,33155,1733714778570/1617b0b1421f%2C33155%2C1733714778570.1733714820162 to hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/oldWALs/1617b0b1421f%2C33155%2C1733714778570.1733714820162 2024-12-09T03:27:10,611 DEBUG [RS:0;1617b0b1421f:33155 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/oldWALs 2024-12-09T03:27:10,611 INFO [RS:0;1617b0b1421f:33155 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C33155%2C1733714778570:(num 1733714830196) 2024-12-09T03:27:10,611 DEBUG [RS:0;1617b0b1421f:33155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:27:10,611 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:27:10,611 INFO [RS:0;1617b0b1421f:33155 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:27:10,611 INFO [RS:0;1617b0b1421f:33155 {}] hbase.ChoreService(370): Chore service for: regionserver/1617b0b1421f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T03:27:10,611 INFO [RS:0;1617b0b1421f:33155 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:27:10,611 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:27:10,612 INFO [RS:0;1617b0b1421f:33155 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33155 2024-12-09T03:27:10,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:27:10,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1617b0b1421f,33155,1733714778570 2024-12-09T03:27:10,699 INFO [RS:0;1617b0b1421f:33155 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:27:10,700 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1617b0b1421f,33155,1733714778570] 2024-12-09T03:27:10,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:10,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:10,720 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1617b0b1421f,33155,1733714778570 already deleted, retry=false 2024-12-09T03:27:10,720 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1617b0b1421f,33155,1733714778570 expired; onlineServers=0 2024-12-09T03:27:10,720 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1617b0b1421f,38361,1733714778390' ***** 2024-12-09T03:27:10,720 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T03:27:10,720 INFO [M:0;1617b0b1421f:38361 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:27:10,720 INFO [M:0;1617b0b1421f:38361 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:27:10,721 DEBUG [M:0;1617b0b1421f:38361 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T03:27:10,721 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T03:27:10,721 DEBUG [M:0;1617b0b1421f:38361 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T03:27:10,721 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714779106 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714779106,5,FailOnTimeoutGroup] 2024-12-09T03:27:10,721 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714779106 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714779106,5,FailOnTimeoutGroup] 2024-12-09T03:27:10,721 INFO [M:0;1617b0b1421f:38361 {}] hbase.ChoreService(370): Chore service for: master/1617b0b1421f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T03:27:10,721 INFO [M:0;1617b0b1421f:38361 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:27:10,721 DEBUG [M:0;1617b0b1421f:38361 {}] master.HMaster(1795): Stopping service threads 2024-12-09T03:27:10,721 INFO [M:0;1617b0b1421f:38361 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T03:27:10,722 INFO [M:0;1617b0b1421f:38361 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:27:10,722 INFO [M:0;1617b0b1421f:38361 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T03:27:10,722 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T03:27:10,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T03:27:10,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:10,731 DEBUG [M:0;1617b0b1421f:38361 {}] zookeeper.ZKUtil(347): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T03:27:10,731 WARN [M:0;1617b0b1421f:38361 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T03:27:10,731 INFO [M:0;1617b0b1421f:38361 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/.lastflushedseqids 2024-12-09T03:27:10,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741849_1025 (size=130) 2024-12-09T03:27:10,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741849_1025 (size=130) 2024-12-09T03:27:10,737 INFO [M:0;1617b0b1421f:38361 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T03:27:10,737 INFO [M:0;1617b0b1421f:38361 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T03:27:10,737 DEBUG [M:0;1617b0b1421f:38361 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:27:10,737 INFO [M:0;1617b0b1421f:38361 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:27:10,737 DEBUG [M:0;1617b0b1421f:38361 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:27:10,737 DEBUG [M:0;1617b0b1421f:38361 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:27:10,737 DEBUG [M:0;1617b0b1421f:38361 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:27:10,737 INFO [M:0;1617b0b1421f:38361 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-12-09T03:27:10,753 DEBUG [M:0;1617b0b1421f:38361 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/49dee49cdab44153a0eb1810d1dbf2ed is 82, key is hbase:meta,,1/info:regioninfo/1733714779818/Put/seqid=0 2024-12-09T03:27:10,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741850_1026 (size=5672) 2024-12-09T03:27:10,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741850_1026 (size=5672) 2024-12-09T03:27:10,757 INFO [M:0;1617b0b1421f:38361 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/49dee49cdab44153a0eb1810d1dbf2ed 2024-12-09T03:27:10,780 DEBUG [M:0;1617b0b1421f:38361 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e008d2d8e4834d9fb91611e2be356dfa is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733714780291/Put/seqid=0 2024-12-09T03:27:10,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741851_1027 (size=7818) 2024-12-09T03:27:10,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741851_1027 (size=7818) 2024-12-09T03:27:10,785 INFO [M:0;1617b0b1421f:38361 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e008d2d8e4834d9fb91611e2be356dfa 2024-12-09T03:27:10,790 INFO [M:0;1617b0b1421f:38361 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e008d2d8e4834d9fb91611e2be356dfa 2024-12-09T03:27:10,803 DEBUG [M:0;1617b0b1421f:38361 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6c73e26c21574386ae883bd0d6e63d36 is 69, key is 1617b0b1421f,33155,1733714778570/rs:state/1733714779130/Put/seqid=0 2024-12-09T03:27:10,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741852_1028 (size=5156) 2024-12-09T03:27:10,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741852_1028 (size=5156) 2024-12-09T03:27:10,808 INFO [M:0;1617b0b1421f:38361 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6c73e26c21574386ae883bd0d6e63d36 2024-12-09T03:27:10,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:27:10,810 INFO [RS:0;1617b0b1421f:33155 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:27:10,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33155-0x100089c1fcd0001, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:27:10,810 INFO [RS:0;1617b0b1421f:33155 {}] regionserver.HRegionServer(1031): Exiting; stopping=1617b0b1421f,33155,1733714778570; zookeeper connection closed. 2024-12-09T03:27:10,810 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@138126e0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@138126e0 2024-12-09T03:27:10,810 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T03:27:10,826 DEBUG [M:0;1617b0b1421f:38361 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e87374d5f3a041faba89a023496b3401 is 52, key is load_balancer_on/state:d/1733714779907/Put/seqid=0 2024-12-09T03:27:10,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741853_1029 (size=5056) 2024-12-09T03:27:10,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741853_1029 (size=5056) 2024-12-09T03:27:10,835 INFO [M:0;1617b0b1421f:38361 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e87374d5f3a041faba89a023496b3401 2024-12-09T03:27:10,840 DEBUG [M:0;1617b0b1421f:38361 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/49dee49cdab44153a0eb1810d1dbf2ed as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/49dee49cdab44153a0eb1810d1dbf2ed 2024-12-09T03:27:10,845 INFO [M:0;1617b0b1421f:38361 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/49dee49cdab44153a0eb1810d1dbf2ed, entries=8, sequenceid=121, filesize=5.5 K 2024-12-09T03:27:10,846 DEBUG [M:0;1617b0b1421f:38361 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e008d2d8e4834d9fb91611e2be356dfa as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e008d2d8e4834d9fb91611e2be356dfa 2024-12-09T03:27:10,851 INFO [M:0;1617b0b1421f:38361 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e008d2d8e4834d9fb91611e2be356dfa 2024-12-09T03:27:10,851 INFO [M:0;1617b0b1421f:38361 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e008d2d8e4834d9fb91611e2be356dfa, entries=14, sequenceid=121, filesize=7.6 K 2024-12-09T03:27:10,852 DEBUG [M:0;1617b0b1421f:38361 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6c73e26c21574386ae883bd0d6e63d36 as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6c73e26c21574386ae883bd0d6e63d36 2024-12-09T03:27:10,857 INFO [M:0;1617b0b1421f:38361 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6c73e26c21574386ae883bd0d6e63d36, entries=1, sequenceid=121, filesize=5.0 K 2024-12-09T03:27:10,858 DEBUG [M:0;1617b0b1421f:38361 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e87374d5f3a041faba89a023496b3401 as hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e87374d5f3a041faba89a023496b3401 2024-12-09T03:27:10,864 INFO [M:0;1617b0b1421f:38361 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39429/user/jenkins/test-data/491302e6-8468-437b-2000-753e7b68b079/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e87374d5f3a041faba89a023496b3401, entries=1, sequenceid=121, filesize=4.9 K 2024-12-09T03:27:10,865 INFO [M:0;1617b0b1421f:38361 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=121, compaction requested=false 2024-12-09T03:27:10,866 INFO [M:0;1617b0b1421f:38361 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:27:10,867 DEBUG [M:0;1617b0b1421f:38361 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714830737Disabling compacts and flushes for region at 1733714830737Disabling writes for close at 1733714830737Obtaining lock to block concurrent updates at 1733714830737Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733714830737Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1733714830738 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733714830738Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733714830738Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733714830752 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733714830752Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733714830762 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733714830780 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733714830780Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733714830790 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733714830803 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733714830803Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733714830812 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733714830825 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733714830825Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fe15f71: reopening flushed file at 1733714830839 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fb1441c: reopening flushed file at 1733714830846 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48a5d6f3: reopening flushed file at 1733714830851 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fcea9df: reopening flushed file at 1733714830857 (+6 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=121, compaction requested=false at 1733714830865 (+8 ms)Writing region close event to WAL at 1733714830866 (+1 ms)Closed at 1733714830866 2024-12-09T03:27:10,867 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,867 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,867 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,867 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,867 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:27:10,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39965 is added to blk_1073741830_1006 (size=52987) 2024-12-09T03:27:10,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44297 is added to blk_1073741830_1006 (size=52987) 2024-12-09T03:27:10,870 INFO [M:0;1617b0b1421f:38361 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T03:27:10,870 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:27:10,870 INFO [M:0;1617b0b1421f:38361 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38361 2024-12-09T03:27:10,870 INFO [M:0;1617b0b1421f:38361 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:27:10,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:27:10,978 INFO [M:0;1617b0b1421f:38361 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:27:10,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38361-0x100089c1fcd0000, quorum=127.0.0.1:63767, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:27:10,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ec0789c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:27:10,982 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@60b4846b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:27:10,982 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:27:10,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6966feef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:27:10,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3272a875{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/hadoop.log.dir/,STOPPED} 2024-12-09T03:27:10,985 WARN [BP-1654235275-172.17.0.3-1733714776134 heartbeating to localhost/127.0.0.1:39429 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:27:10,985 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:27:10,985 WARN [BP-1654235275-172.17.0.3-1733714776134 heartbeating to localhost/127.0.0.1:39429 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1654235275-172.17.0.3-1733714776134 (Datanode Uuid 36ca2ec1-260c-4ccd-a6b4-4f2f8f4dcc8d) service to localhost/127.0.0.1:39429 2024-12-09T03:27:10,985 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:27:10,986 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac/data/data3/current/BP-1654235275-172.17.0.3-1733714776134 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:27:10,986 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac/data/data4/current/BP-1654235275-172.17.0.3-1733714776134 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:27:10,986 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:27:10,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a70e9fd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:27:10,988 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f54bfcd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:27:10,988 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:27:10,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a7807c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:27:10,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c1d88de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/hadoop.log.dir/,STOPPED} 2024-12-09T03:27:10,990 WARN [BP-1654235275-172.17.0.3-1733714776134 heartbeating to localhost/127.0.0.1:39429 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:27:10,990 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:27:10,990 WARN [BP-1654235275-172.17.0.3-1733714776134 heartbeating to localhost/127.0.0.1:39429 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1654235275-172.17.0.3-1733714776134 (Datanode Uuid c0893080-d240-410a-9a64-cad7dfd78c32) service to localhost/127.0.0.1:39429 2024-12-09T03:27:10,990 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:27:10,991 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac/data/data1/current/BP-1654235275-172.17.0.3-1733714776134 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:27:10,991 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/cluster_a78e8962-5a7c-ad20-5df4-9be396b484ac/data/data2/current/BP-1654235275-172.17.0.3-1733714776134 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:27:10,991 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:27:10,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4182d22c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:27:10,997 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22d52882{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:27:10,997 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:27:10,998 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e7fcc45{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:27:10,998 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d9708a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/hadoop.log.dir/,STOPPED} 2024-12-09T03:27:11,004 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T03:27:11,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T03:27:11,031 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/1617b0b1421f:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39429 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:39429 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:39429 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:39429 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39429 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39429 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39429 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39429 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:39429 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=486 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=162 (was 159) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5194 (was 5747) 2024-12-09T03:27:11,039 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=486, MaxFileDescriptor=1048576, SystemLoadAverage=162, ProcessCount=11, AvailableMemoryMB=5194 2024-12-09T03:27:11,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T03:27:11,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/hadoop.log.dir so I do NOT create it in target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879 2024-12-09T03:27:11,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc3028c-7644-9be1-51a1-490dfc7a2661/hadoop.tmp.dir so I do NOT create it in target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879 2024-12-09T03:27:11,039 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda, deleteOnExit=true 2024-12-09T03:27:11,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T03:27:11,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/test.cache.data in system properties and HBase conf 2024-12-09T03:27:11,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T03:27:11,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/hadoop.log.dir in system properties and HBase conf 2024-12-09T03:27:11,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T03:27:11,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T03:27:11,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T03:27:11,040 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T03:27:11,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:27:11,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:27:11,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T03:27:11,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:27:11,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T03:27:11,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T03:27:11,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:27:11,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:27:11,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T03:27:11,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/nfs.dump.dir in system properties and HBase conf 2024-12-09T03:27:11,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/java.io.tmpdir in system properties and HBase conf 2024-12-09T03:27:11,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:27:11,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T03:27:11,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T03:27:11,055 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:27:11,155 INFO [regionserver/1617b0b1421f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:27:11,422 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:27:11,427 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:27:11,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:27:11,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:27:11,435 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:27:11,436 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:27:11,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43d8bd5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:27:11,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d4e3343{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:27:11,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6fbe59a1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/java.io.tmpdir/jetty-localhost-39261-hadoop-hdfs-3_4_1-tests_jar-_-any-17581856439869134850/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:27:11,573 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5712bc9d{HTTP/1.1, (http/1.1)}{localhost:39261} 2024-12-09T03:27:11,573 INFO [Time-limited test {}] server.Server(415): Started @251823ms 2024-12-09T03:27:11,590 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:27:11,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:11,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:11,888 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:27:11,892 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:27:11,896 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:27:11,896 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:27:11,897 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:27:11,901 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5284eb61{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:27:11,901 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@256a17a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:27:12,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1102e2d6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/java.io.tmpdir/jetty-localhost-42361-hadoop-hdfs-3_4_1-tests_jar-_-any-3930118420943423517/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:27:12,027 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1cb72b8{HTTP/1.1, (http/1.1)}{localhost:42361} 2024-12-09T03:27:12,027 INFO [Time-limited test {}] server.Server(415): Started @252277ms 2024-12-09T03:27:12,028 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:27:12,079 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:27:12,082 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:27:12,085 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:27:12,085 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:27:12,085 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:27:12,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dce6fab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:27:12,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63b7be59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:27:12,204 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e87fea8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/java.io.tmpdir/jetty-localhost-36315-hadoop-hdfs-3_4_1-tests_jar-_-any-1056260203508218551/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:27:12,204 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@604cd81b{HTTP/1.1, (http/1.1)}{localhost:36315} 2024-12-09T03:27:12,204 INFO [Time-limited test {}] server.Server(415): Started @252455ms 2024-12-09T03:27:12,205 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:27:12,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:12,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:13,263 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda/data/data2/current/BP-1650772398-172.17.0.3-1733714831059/current, will proceed with Du for space computation calculation, 2024-12-09T03:27:13,263 WARN [Thread-1971 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda/data/data1/current/BP-1650772398-172.17.0.3-1733714831059/current, will proceed with Du for space computation calculation, 2024-12-09T03:27:13,283 WARN [Thread-1935 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:27:13,284 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6afc67cdecf3dcb1 with lease ID 0x43465c28aba4bda3: Processing first storage report for DS-fefb5221-8b48-4cd6-8b0d-611f020f91e8 from datanode DatanodeRegistration(127.0.0.1:45573, datanodeUuid=daf18ddc-37d6-458f-bb9e-e23f6bbaa620, infoPort=35319, infoSecurePort=0, ipcPort=46795, storageInfo=lv=-57;cid=testClusterID;nsid=1079185753;c=1733714831059) 2024-12-09T03:27:13,285 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6afc67cdecf3dcb1 with lease ID 0x43465c28aba4bda3: from storage DS-fefb5221-8b48-4cd6-8b0d-611f020f91e8 node DatanodeRegistration(127.0.0.1:45573, datanodeUuid=daf18ddc-37d6-458f-bb9e-e23f6bbaa620, infoPort=35319, infoSecurePort=0, ipcPort=46795, storageInfo=lv=-57;cid=testClusterID;nsid=1079185753;c=1733714831059), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:27:13,285 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6afc67cdecf3dcb1 with lease ID 0x43465c28aba4bda3: Processing first storage report for DS-ffe280c2-eae5-4dc6-a522-ad4d9f5cbe42 from datanode DatanodeRegistration(127.0.0.1:45573, datanodeUuid=daf18ddc-37d6-458f-bb9e-e23f6bbaa620, infoPort=35319, infoSecurePort=0, ipcPort=46795, storageInfo=lv=-57;cid=testClusterID;nsid=1079185753;c=1733714831059) 2024-12-09T03:27:13,285 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6afc67cdecf3dcb1 with lease ID 0x43465c28aba4bda3: from storage DS-ffe280c2-eae5-4dc6-a522-ad4d9f5cbe42 node DatanodeRegistration(127.0.0.1:45573, datanodeUuid=daf18ddc-37d6-458f-bb9e-e23f6bbaa620, infoPort=35319, infoSecurePort=0, ipcPort=46795, storageInfo=lv=-57;cid=testClusterID;nsid=1079185753;c=1733714831059), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:27:13,392 WARN [Thread-1982 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda/data/data3/current/BP-1650772398-172.17.0.3-1733714831059/current, will proceed with Du for space computation calculation, 2024-12-09T03:27:13,393 WARN [Thread-1983 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda/data/data4/current/BP-1650772398-172.17.0.3-1733714831059/current, will proceed with Du for space computation calculation, 2024-12-09T03:27:13,409 WARN [Thread-1958 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:27:13,411 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4e610b1a1fb034c5 with lease ID 0x43465c28aba4bda4: Processing first storage report for DS-c93028ae-04d9-40cd-9647-a7944485975e from datanode DatanodeRegistration(127.0.0.1:33847, datanodeUuid=9a026953-f60e-43b6-8ee1-5585f52ca9da, infoPort=35443, infoSecurePort=0, ipcPort=40341, storageInfo=lv=-57;cid=testClusterID;nsid=1079185753;c=1733714831059) 2024-12-09T03:27:13,411 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4e610b1a1fb034c5 with lease ID 0x43465c28aba4bda4: from storage DS-c93028ae-04d9-40cd-9647-a7944485975e node DatanodeRegistration(127.0.0.1:33847, datanodeUuid=9a026953-f60e-43b6-8ee1-5585f52ca9da, infoPort=35443, infoSecurePort=0, ipcPort=40341, storageInfo=lv=-57;cid=testClusterID;nsid=1079185753;c=1733714831059), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:27:13,411 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4e610b1a1fb034c5 with lease ID 0x43465c28aba4bda4: Processing first storage report for DS-7968ff76-67f4-4684-a8d7-71f2734b40db from datanode DatanodeRegistration(127.0.0.1:33847, datanodeUuid=9a026953-f60e-43b6-8ee1-5585f52ca9da, infoPort=35443, infoSecurePort=0, ipcPort=40341, storageInfo=lv=-57;cid=testClusterID;nsid=1079185753;c=1733714831059) 2024-12-09T03:27:13,411 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4e610b1a1fb034c5 with lease ID 0x43465c28aba4bda4: from storage DS-7968ff76-67f4-4684-a8d7-71f2734b40db node DatanodeRegistration(127.0.0.1:33847, datanodeUuid=9a026953-f60e-43b6-8ee1-5585f52ca9da, infoPort=35443, infoSecurePort=0, ipcPort=40341, storageInfo=lv=-57;cid=testClusterID;nsid=1079185753;c=1733714831059), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:27:13,462 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879 2024-12-09T03:27:13,465 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda/zookeeper_0, clientPort=61967, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T03:27:13,465 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61967 2024-12-09T03:27:13,466 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:27:13,467 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:27:13,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:27:13,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:27:13,475 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115 with version=8 2024-12-09T03:27:13,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/hbase-staging 2024-12-09T03:27:13,478 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:27:13,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:27:13,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:27:13,478 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:27:13,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:27:13,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:27:13,478 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T03:27:13,478 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:27:13,479 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38129 2024-12-09T03:27:13,480 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38129 connecting to ZooKeeper ensemble=127.0.0.1:61967 2024-12-09T03:27:13,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:381290x0, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:27:13,557 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38129-0x100089cf6ff0000 connected 2024-12-09T03:27:13,635 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:27:13,636 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:27:13,642 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:27:13,643 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115, hbase.cluster.distributed=false 2024-12-09T03:27:13,644 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:27:13,653 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38129 2024-12-09T03:27:13,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38129 2024-12-09T03:27:13,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38129 2024-12-09T03:27:13,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38129 2024-12-09T03:27:13,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38129 2024-12-09T03:27:13,683 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:27:13,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:27:13,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:27:13,683 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:27:13,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:27:13,683 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:27:13,683 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:27:13,683 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:27:13,685 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44043 2024-12-09T03:27:13,687 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44043 connecting to ZooKeeper ensemble=127.0.0.1:61967 2024-12-09T03:27:13,688 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:27:13,690 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:27:13,708 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440430x0, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:27:13,708 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44043-0x100089cf6ff0001 connected 2024-12-09T03:27:13,708 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:27:13,709 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:27:13,713 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:27:13,714 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:27:13,715 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:27:13,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:13,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44043 2024-12-09T03:27:13,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44043 2024-12-09T03:27:13,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44043 2024-12-09T03:27:13,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:13,721 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44043 2024-12-09T03:27:13,721 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44043 2024-12-09T03:27:13,732 DEBUG [M:0;1617b0b1421f:38129 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1617b0b1421f:38129 2024-12-09T03:27:13,737 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1617b0b1421f,38129,1733714833477 2024-12-09T03:27:13,749 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:27:13,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:27:13,750 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1617b0b1421f,38129,1733714833477 2024-12-09T03:27:13,761 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:27:13,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:13,761 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:13,761 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T03:27:13,761 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1617b0b1421f,38129,1733714833477 from backup master directory 2024-12-09T03:27:13,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1617b0b1421f,38129,1733714833477 2024-12-09T03:27:13,771 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:27:13,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:27:13,771 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:27:13,771 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1617b0b1421f,38129,1733714833477 2024-12-09T03:27:13,777 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/hbase.id] with ID: bad9a242-0de4-4b99-bb13-03c40e2fc514 2024-12-09T03:27:13,777 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/.tmp/hbase.id 2024-12-09T03:27:13,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:27:13,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:27:13,783 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/.tmp/hbase.id]:[hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/hbase.id] 2024-12-09T03:27:13,794 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:27:13,794 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T03:27:13,796 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T03:27:13,803 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:13,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:13,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:27:13,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:27:14,210 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:27:14,211 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T03:27:14,211 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:27:14,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:27:14,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:27:14,223 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store 2024-12-09T03:27:14,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:27:14,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:27:14,229 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:27:14,229 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:27:14,230 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:27:14,230 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:27:14,230 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:27:14,230 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:27:14,230 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:27:14,230 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714834229Disabling compacts and flushes for region at 1733714834229Disabling writes for close at 1733714834230 (+1 ms)Writing region close event to WAL at 1733714834230Closed at 1733714834230 2024-12-09T03:27:14,231 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/.initializing 2024-12-09T03:27:14,231 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/WALs/1617b0b1421f,38129,1733714833477 2024-12-09T03:27:14,233 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C38129%2C1733714833477, suffix=, logDir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/WALs/1617b0b1421f,38129,1733714833477, archiveDir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/oldWALs, maxLogs=10 2024-12-09T03:27:14,234 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C38129%2C1733714833477.1733714834233 2024-12-09T03:27:14,239 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/WALs/1617b0b1421f,38129,1733714833477/1617b0b1421f%2C38129%2C1733714833477.1733714834233 2024-12-09T03:27:14,240 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35319:35319),(127.0.0.1/127.0.0.1:35443:35443)] 2024-12-09T03:27:14,241 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:27:14,241 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:27:14,241 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,241 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,243 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,244 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T03:27:14,244 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:14,245 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:27:14,245 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,246 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T03:27:14,246 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:14,246 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:27:14,247 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,248 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T03:27:14,248 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:14,248 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:27:14,248 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,250 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T03:27:14,250 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:14,250 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:27:14,250 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,251 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,251 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,252 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,252 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,252 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:27:14,253 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:27:14,255 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:27:14,256 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=807234, jitterRate=0.026452332735061646}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:27:14,257 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733714834241Initializing all the Stores at 1733714834242 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714834242Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714834243 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714834243Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714834243Cleaning up temporary data from old regions at 1733714834252 (+9 ms)Region opened successfully at 1733714834256 (+4 ms) 2024-12-09T03:27:14,261 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T03:27:14,265 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74723270, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:27:14,265 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T03:27:14,266 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T03:27:14,266 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T03:27:14,266 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T03:27:14,267 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T03:27:14,267 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T03:27:14,267 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T03:27:14,270 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T03:27:14,271 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T03:27:14,318 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T03:27:14,319 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T03:27:14,319 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T03:27:14,329 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T03:27:14,329 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T03:27:14,330 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T03:27:14,339 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T03:27:14,340 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T03:27:14,350 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T03:27:14,352 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T03:27:14,360 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T03:27:14,371 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:27:14,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:27:14,371 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:14,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:14,373 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1617b0b1421f,38129,1733714833477, sessionid=0x100089cf6ff0000, setting cluster-up flag (Was=false) 2024-12-09T03:27:14,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:14,392 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:14,424 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T03:27:14,425 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,38129,1733714833477 2024-12-09T03:27:14,445 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:14,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:14,487 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T03:27:14,488 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,38129,1733714833477 2024-12-09T03:27:14,490 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T03:27:14,491 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T03:27:14,491 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T03:27:14,492 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T03:27:14,492 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1617b0b1421f,38129,1733714833477 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T03:27:14,493 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:27:14,493 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:27:14,493 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:27:14,493 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:27:14,494 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1617b0b1421f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T03:27:14,494 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,494 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:27:14,494 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,496 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:27:14,496 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T03:27:14,497 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733714864496 2024-12-09T03:27:14,497 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T03:27:14,497 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T03:27:14,497 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T03:27:14,497 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T03:27:14,497 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T03:27:14,497 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T03:27:14,497 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,498 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:14,498 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T03:27:14,498 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T03:27:14,499 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T03:27:14,500 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T03:27:14,505 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T03:27:14,505 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T03:27:14,505 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714834505,5,FailOnTimeoutGroup] 2024-12-09T03:27:14,506 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714834505,5,FailOnTimeoutGroup] 2024-12-09T03:27:14,506 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,506 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T03:27:14,506 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,506 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:27:14,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:27:14,508 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T03:27:14,509 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115 2024-12-09T03:27:14,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:27:14,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:27:14,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:27:14,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:27:14,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:27:14,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:14,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:27:14,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:27:14,522 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:27:14,522 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:14,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:27:14,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:27:14,524 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:27:14,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:14,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:27:14,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:27:14,525 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:27:14,525 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:14,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:27:14,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:27:14,527 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740 2024-12-09T03:27:14,527 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740 2024-12-09T03:27:14,528 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:27:14,528 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:27:14,528 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:27:14,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:27:14,530 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(746): ClusterId : bad9a242-0de4-4b99-bb13-03c40e2fc514 2024-12-09T03:27:14,530 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:27:14,533 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:27:14,533 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812304, jitterRate=0.03289872407913208}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:27:14,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733714834519Initializing all the Stores at 1733714834519Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714834519Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714834520 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714834520Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714834520Cleaning up temporary data from old regions at 1733714834528 (+8 ms)Region opened successfully at 1733714834534 (+6 ms) 2024-12-09T03:27:14,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:27:14,534 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:27:14,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:27:14,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:27:14,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:27:14,535 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:27:14,535 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714834534Disabling compacts and flushes for region at 1733714834534Disabling writes for close at 1733714834534Writing region close event to WAL at 1733714834535 (+1 ms)Closed at 1733714834535 2024-12-09T03:27:14,536 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:27:14,536 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T03:27:14,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T03:27:14,538 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:27:14,538 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T03:27:14,540 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:27:14,540 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:27:14,551 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:27:14,551 DEBUG [RS:0;1617b0b1421f:44043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a9eca0a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:27:14,562 DEBUG [RS:0;1617b0b1421f:44043 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1617b0b1421f:44043 2024-12-09T03:27:14,562 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:27:14,562 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:27:14,562 DEBUG [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:27:14,563 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(2659): reportForDuty to master=1617b0b1421f,38129,1733714833477 with port=44043, startcode=1733714833682 2024-12-09T03:27:14,563 DEBUG [RS:0;1617b0b1421f:44043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:27:14,565 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52375, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:27:14,565 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38129 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1617b0b1421f,44043,1733714833682 2024-12-09T03:27:14,566 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38129 {}] master.ServerManager(517): Registering regionserver=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:14,567 DEBUG [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115 2024-12-09T03:27:14,567 DEBUG [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34819 2024-12-09T03:27:14,567 DEBUG [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:27:14,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:27:14,580 DEBUG [RS:0;1617b0b1421f:44043 {}] zookeeper.ZKUtil(111): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1617b0b1421f,44043,1733714833682 2024-12-09T03:27:14,581 WARN [RS:0;1617b0b1421f:44043 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:27:14,581 INFO [RS:0;1617b0b1421f:44043 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:27:14,581 DEBUG [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/WALs/1617b0b1421f,44043,1733714833682 2024-12-09T03:27:14,581 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1617b0b1421f,44043,1733714833682] 2024-12-09T03:27:14,584 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:27:14,585 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:27:14,585 INFO [RS:0;1617b0b1421f:44043 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:27:14,586 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,586 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:27:14,587 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:27:14,587 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,587 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,587 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,587 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,587 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,587 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,587 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:27:14,587 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,587 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,587 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,587 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,588 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,588 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:27:14,588 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:27:14,588 DEBUG [RS:0;1617b0b1421f:44043 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:27:14,588 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,588 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,588 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,588 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,588 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,588 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,44043,1733714833682-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:27:14,607 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:27:14,607 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,44043,1733714833682-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,607 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,607 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.Replication(171): 1617b0b1421f,44043,1733714833682 started 2024-12-09T03:27:14,623 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:14,623 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(1482): Serving as 1617b0b1421f,44043,1733714833682, RpcServer on 1617b0b1421f/172.17.0.3:44043, sessionid=0x100089cf6ff0001 2024-12-09T03:27:14,623 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:27:14,623 DEBUG [RS:0;1617b0b1421f:44043 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1617b0b1421f,44043,1733714833682 2024-12-09T03:27:14,623 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,44043,1733714833682' 2024-12-09T03:27:14,623 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:27:14,623 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:27:14,624 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:27:14,624 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:27:14,624 DEBUG [RS:0;1617b0b1421f:44043 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1617b0b1421f,44043,1733714833682 2024-12-09T03:27:14,624 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,44043,1733714833682' 2024-12-09T03:27:14,624 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:27:14,624 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:27:14,624 DEBUG [RS:0;1617b0b1421f:44043 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:27:14,624 INFO [RS:0;1617b0b1421f:44043 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:27:14,624 INFO [RS:0;1617b0b1421f:44043 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:27:14,689 WARN [1617b0b1421f:38129 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T03:27:14,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:14,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:14,726 INFO [RS:0;1617b0b1421f:44043 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C44043%2C1733714833682, suffix=, logDir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/WALs/1617b0b1421f,44043,1733714833682, archiveDir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/oldWALs, maxLogs=32 2024-12-09T03:27:14,727 INFO [RS:0;1617b0b1421f:44043 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C44043%2C1733714833682.1733714834726 2024-12-09T03:27:14,731 INFO [RS:0;1617b0b1421f:44043 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/WALs/1617b0b1421f,44043,1733714833682/1617b0b1421f%2C44043%2C1733714833682.1733714834726 2024-12-09T03:27:14,732 DEBUG [RS:0;1617b0b1421f:44043 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35443:35443),(127.0.0.1/127.0.0.1:35319:35319)] 2024-12-09T03:27:14,939 DEBUG [1617b0b1421f:38129 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T03:27:14,940 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:14,941 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,44043,1733714833682, state=OPENING 2024-12-09T03:27:14,992 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T03:27:15,003 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:15,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:27:15,004 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:27:15,005 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:27:15,005 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:27:15,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,44043,1733714833682}] 2024-12-09T03:27:15,161 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T03:27:15,164 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45183, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T03:27:15,169 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T03:27:15,169 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:27:15,172 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C44043%2C1733714833682.meta, suffix=.meta, logDir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/WALs/1617b0b1421f,44043,1733714833682, archiveDir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/oldWALs, maxLogs=32 2024-12-09T03:27:15,173 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C44043%2C1733714833682.meta.1733714835172.meta 2024-12-09T03:27:15,178 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/WALs/1617b0b1421f,44043,1733714833682/1617b0b1421f%2C44043%2C1733714833682.meta.1733714835172.meta 2024-12-09T03:27:15,181 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35443:35443),(127.0.0.1/127.0.0.1:35319:35319)] 2024-12-09T03:27:15,182 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:27:15,182 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T03:27:15,182 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T03:27:15,182 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T03:27:15,182 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T03:27:15,183 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:27:15,183 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T03:27:15,183 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T03:27:15,184 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:27:15,185 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:27:15,185 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:15,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:27:15,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:27:15,186 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:27:15,186 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:15,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:27:15,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:27:15,188 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:27:15,188 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:15,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:27:15,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:27:15,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:27:15,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:15,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:27:15,189 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:27:15,190 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740 2024-12-09T03:27:15,191 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740 2024-12-09T03:27:15,192 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:27:15,192 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:27:15,192 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:27:15,193 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:27:15,194 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773132, jitterRate=-0.01691286265850067}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:27:15,194 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T03:27:15,194 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733714835183Writing region info on filesystem at 1733714835183Initializing all the Stores at 1733714835184 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714835184Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714835184Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714835184Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714835184Cleaning up temporary data from old regions at 1733714835192 (+8 ms)Running coprocessor post-open hooks at 1733714835194 (+2 ms)Region opened successfully at 1733714835194 2024-12-09T03:27:15,195 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733714835161 2024-12-09T03:27:15,197 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T03:27:15,197 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T03:27:15,198 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:15,199 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,44043,1733714833682, state=OPEN 2024-12-09T03:27:15,307 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,308 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:27:15,321 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:27:15,321 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:15,321 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:27:15,321 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:27:15,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T03:27:15,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,44043,1733714833682 in 316 msec 2024-12-09T03:27:15,326 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T03:27:15,326 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 788 msec 2024-12-09T03:27:15,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,327 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:27:15,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,327 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T03:27:15,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,328 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:27:15,328 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,44043,1733714833682, seqNum=-1] 2024-12-09T03:27:15,328 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:27:15,329 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50225, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:27:15,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,335 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 843 msec 2024-12-09T03:27:15,335 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733714835335, completionTime=-1 2024-12-09T03:27:15,335 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T03:27:15,335 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T03:27:15,337 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T03:27:15,337 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733714895337 2024-12-09T03:27:15,337 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733714955337 2024-12-09T03:27:15,337 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T03:27:15,338 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38129,1733714833477-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:15,338 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38129,1733714833477-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:15,338 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38129,1733714833477-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:15,338 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1617b0b1421f:38129, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:15,338 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:15,338 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:15,340 DEBUG [master/1617b0b1421f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T03:27:15,342 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.571sec 2024-12-09T03:27:15,342 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T03:27:15,342 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T03:27:15,342 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T03:27:15,342 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T03:27:15,342 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T03:27:15,342 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38129,1733714833477-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:27:15,342 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38129,1733714833477-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T03:27:15,344 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T03:27:15,344 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T03:27:15,344 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,38129,1733714833477-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:27:15,431 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@303a8bec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:27:15,431 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1617b0b1421f,38129,-1 for getting cluster id 2024-12-09T03:27:15,431 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T03:27:15,433 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'bad9a242-0de4-4b99-bb13-03c40e2fc514' 2024-12-09T03:27:15,433 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T03:27:15,434 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "bad9a242-0de4-4b99-bb13-03c40e2fc514" 2024-12-09T03:27:15,434 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f9b4adb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:27:15,434 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1617b0b1421f,38129,-1] 2024-12-09T03:27:15,435 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T03:27:15,435 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:27:15,436 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39168, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T03:27:15,438 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40b03f71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:27:15,438 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:27:15,439 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,44043,1733714833682, seqNum=-1] 2024-12-09T03:27:15,440 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:27:15,441 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49000, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:27:15,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1617b0b1421f,38129,1733714833477 2024-12-09T03:27:15,443 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:27:15,446 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T03:27:15,446 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T03:27:15,447 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 1617b0b1421f,38129,1733714833477 2024-12-09T03:27:15,447 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4cbab6a4 2024-12-09T03:27:15,448 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T03:27:15,449 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39180, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T03:27:15,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38129 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T03:27:15,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38129 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T03:27:15,451 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38129 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:27:15,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38129 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-09T03:27:15,454 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T03:27:15,454 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:15,454 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38129 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-09T03:27:15,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38129 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:27:15,455 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T03:27:15,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741835_1011 (size=381) 2024-12-09T03:27:15,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741835_1011 (size=381) 2024-12-09T03:27:15,463 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d4e2ff6e64d18ee11bc1992b0fcdca97, NAME => 'TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115 2024-12-09T03:27:15,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741836_1012 (size=64) 2024-12-09T03:27:15,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741836_1012 (size=64) 2024-12-09T03:27:15,468 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:27:15,469 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing d4e2ff6e64d18ee11bc1992b0fcdca97, disabling compactions & flushes 2024-12-09T03:27:15,469 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:15,469 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:15,469 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. after waiting 0 ms 2024-12-09T03:27:15,469 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:15,469 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:15,469 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for d4e2ff6e64d18ee11bc1992b0fcdca97: Waiting for close lock at 1733714835469Disabling compacts and flushes for region at 1733714835469Disabling writes for close at 1733714835469Writing region close event to WAL at 1733714835469Closed at 1733714835469 2024-12-09T03:27:15,470 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T03:27:15,470 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733714835470"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733714835470"}]},"ts":"1733714835470"} 2024-12-09T03:27:15,472 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T03:27:15,473 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T03:27:15,473 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733714835473"}]},"ts":"1733714835473"} 2024-12-09T03:27:15,475 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-09T03:27:15,476 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4e2ff6e64d18ee11bc1992b0fcdca97, ASSIGN}] 2024-12-09T03:27:15,477 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4e2ff6e64d18ee11bc1992b0fcdca97, ASSIGN 2024-12-09T03:27:15,478 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4e2ff6e64d18ee11bc1992b0fcdca97, ASSIGN; state=OFFLINE, location=1617b0b1421f,44043,1733714833682; forceNewPlan=false, retain=false 2024-12-09T03:27:15,629 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d4e2ff6e64d18ee11bc1992b0fcdca97, regionState=OPENING, regionLocation=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:15,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4e2ff6e64d18ee11bc1992b0fcdca97, ASSIGN because future has completed 2024-12-09T03:27:15,632 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682}] 2024-12-09T03:27:15,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:15,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:15,790 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:15,791 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d4e2ff6e64d18ee11bc1992b0fcdca97, NAME => 'TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:27:15,791 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:15,791 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:27:15,791 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:15,791 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:15,794 INFO [StoreOpener-d4e2ff6e64d18ee11bc1992b0fcdca97-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:15,796 INFO [StoreOpener-d4e2ff6e64d18ee11bc1992b0fcdca97-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d4e2ff6e64d18ee11bc1992b0fcdca97 columnFamilyName info 2024-12-09T03:27:15,796 DEBUG [StoreOpener-d4e2ff6e64d18ee11bc1992b0fcdca97-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:15,797 INFO [StoreOpener-d4e2ff6e64d18ee11bc1992b0fcdca97-1 {}] regionserver.HStore(327): Store=d4e2ff6e64d18ee11bc1992b0fcdca97/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:27:15,797 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:15,798 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:15,798 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:15,799 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:15,799 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:15,801 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:15,803 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:27:15,804 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d4e2ff6e64d18ee11bc1992b0fcdca97; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769464, jitterRate=-0.021576672792434692}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T03:27:15,804 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:15,805 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d4e2ff6e64d18ee11bc1992b0fcdca97: Running coprocessor pre-open hook at 1733714835792Writing region info on filesystem at 1733714835792Initializing all the Stores at 1733714835793 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714835793Cleaning up temporary data from old regions at 1733714835799 (+6 ms)Running coprocessor post-open hooks at 1733714835804 (+5 ms)Region opened successfully at 1733714835805 (+1 ms) 2024-12-09T03:27:15,806 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., pid=6, masterSystemTime=1733714835785 2024-12-09T03:27:15,808 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:15,808 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:15,808 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d4e2ff6e64d18ee11bc1992b0fcdca97, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:15,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 because future has completed 2024-12-09T03:27:15,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T03:27:15,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 in 179 msec 2024-12-09T03:27:15,816 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T03:27:15,816 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4e2ff6e64d18ee11bc1992b0fcdca97, ASSIGN in 338 msec 2024-12-09T03:27:15,816 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T03:27:15,817 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733714835816"}]},"ts":"1733714835816"} 2024-12-09T03:27:15,819 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-09T03:27:15,821 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T03:27:15,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 370 msec 2024-12-09T03:27:15,838 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:27:15,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,843 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,872 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,872 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,877 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:15,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:16,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:16,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:16,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-09T03:27:16,745 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T03:27:16,746 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T03:27:17,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:17,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:18,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:18,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:19,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:19,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:20,584 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T03:27:20,584 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-09T03:27:20,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:20,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:21,293 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:27:21,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,295 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,296 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,313 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,319 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:21,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:21,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:22,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:22,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:23,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:23,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:24,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:24,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:25,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38129 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T03:27:25,492 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-09T03:27:25,493 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-09T03:27:25,496 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-09T03:27:25,497 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:25,501 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2] 2024-12-09T03:27:25,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:25,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d4e2ff6e64d18ee11bc1992b0fcdca97 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T03:27:25,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/06f7a9e333ab417c81faefd291b43477 is 1080, key is row0001/info:/1733714845502/Put/seqid=0 2024-12-09T03:27:25,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741837_1013 (size=12509) 2024-12-09T03:27:25,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741837_1013 (size=12509) 2024-12-09T03:27:25,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/06f7a9e333ab417c81faefd291b43477 2024-12-09T03:27:25,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/06f7a9e333ab417c81faefd291b43477 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/06f7a9e333ab417c81faefd291b43477 2024-12-09T03:27:25,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/06f7a9e333ab417c81faefd291b43477, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T03:27:25,554 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for d4e2ff6e64d18ee11bc1992b0fcdca97 in 38ms, sequenceid=11, compaction requested=false 2024-12-09T03:27:25,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d4e2ff6e64d18ee11bc1992b0fcdca97: 2024-12-09T03:27:25,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:25,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d4e2ff6e64d18ee11bc1992b0fcdca97 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-09T03:27:25,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/284cf55b3799468189354d6fc5e87673 is 1080, key is row0008/info:/1733714845518/Put/seqid=0 2024-12-09T03:27:25,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741838_1014 (size=24376) 2024-12-09T03:27:25,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741838_1014 (size=24376) 2024-12-09T03:27:25,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/284cf55b3799468189354d6fc5e87673 2024-12-09T03:27:25,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/284cf55b3799468189354d6fc5e87673 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/284cf55b3799468189354d6fc5e87673 2024-12-09T03:27:25,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/284cf55b3799468189354d6fc5e87673, entries=18, sequenceid=32, filesize=23.8 K 2024-12-09T03:27:25,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=7.36 KB/7532 for d4e2ff6e64d18ee11bc1992b0fcdca97 in 23ms, sequenceid=32, compaction requested=false 2024-12-09T03:27:25,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d4e2ff6e64d18ee11bc1992b0fcdca97: 2024-12-09T03:27:25,579 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.0 K, sizeToCheck=16.0 K 2024-12-09T03:27:25,579 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:27:25,579 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/284cf55b3799468189354d6fc5e87673 because midkey is the same as first or last row 2024-12-09T03:27:25,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:25,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:26,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:26,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:27,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:27,576 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d4e2ff6e64d18ee11bc1992b0fcdca97 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-09T03:27:27,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/e02912d210fc4fc1998c0054ae7ddb70 is 1080, key is row0026/info:/1733714845557/Put/seqid=0 2024-12-09T03:27:27,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741839_1015 (size=13586) 2024-12-09T03:27:27,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741839_1015 (size=13586) 2024-12-09T03:27:27,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T03:27:27,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:49000 deadline: 1733714857620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:27,645 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T03:27:27,645 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T03:27:27,645 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2 because the exception is null or not the one we care about 2024-12-09T03:27:27,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:27,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:27,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/e02912d210fc4fc1998c0054ae7ddb70 2024-12-09T03:27:27,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/e02912d210fc4fc1998c0054ae7ddb70 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/e02912d210fc4fc1998c0054ae7ddb70 2024-12-09T03:27:28,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/e02912d210fc4fc1998c0054ae7ddb70, entries=8, sequenceid=43, filesize=13.3 K 2024-12-09T03:27:28,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=22.07 KB/22596 for d4e2ff6e64d18ee11bc1992b0fcdca97 in 428ms, sequenceid=43, compaction requested=true 2024-12-09T03:27:28,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d4e2ff6e64d18ee11bc1992b0fcdca97: 2024-12-09T03:27:28,004 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-12-09T03:27:28,004 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:27:28,004 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/284cf55b3799468189354d6fc5e87673 because midkey is the same as first or last row 2024-12-09T03:27:28,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d4e2ff6e64d18ee11bc1992b0fcdca97:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:27:28,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:27:28,005 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:27:28,006 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:27:28,006 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1541): d4e2ff6e64d18ee11bc1992b0fcdca97/info is initiating minor compaction (all files) 2024-12-09T03:27:28,006 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d4e2ff6e64d18ee11bc1992b0fcdca97/info in TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:28,006 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/06f7a9e333ab417c81faefd291b43477, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/284cf55b3799468189354d6fc5e87673, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/e02912d210fc4fc1998c0054ae7ddb70] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp, totalSize=49.3 K 2024-12-09T03:27:28,006 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 06f7a9e333ab417c81faefd291b43477, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733714845502 2024-12-09T03:27:28,007 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 284cf55b3799468189354d6fc5e87673, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1733714845518 2024-12-09T03:27:28,007 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting e02912d210fc4fc1998c0054ae7ddb70, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733714845557 2024-12-09T03:27:28,018 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d4e2ff6e64d18ee11bc1992b0fcdca97#info#compaction#57 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:27:28,018 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/5b008c2667bf4d56adc06951d479b11a is 1080, key is row0001/info:/1733714845502/Put/seqid=0 2024-12-09T03:27:28,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741840_1016 (size=40670) 2024-12-09T03:27:28,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741840_1016 (size=40670) 2024-12-09T03:27:28,041 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/5b008c2667bf4d56adc06951d479b11a as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/5b008c2667bf4d56adc06951d479b11a 2024-12-09T03:27:28,047 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d4e2ff6e64d18ee11bc1992b0fcdca97/info of d4e2ff6e64d18ee11bc1992b0fcdca97 into 5b008c2667bf4d56adc06951d479b11a(size=39.7 K), total size for store is 39.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:27:28,047 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d4e2ff6e64d18ee11bc1992b0fcdca97: 2024-12-09T03:27:28,047 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., storeName=d4e2ff6e64d18ee11bc1992b0fcdca97/info, priority=13, startTime=1733714848004; duration=0sec 2024-12-09T03:27:28,047 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.7 K, sizeToCheck=16.0 K 2024-12-09T03:27:28,047 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:27:28,047 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/5b008c2667bf4d56adc06951d479b11a because midkey is the same as first or last row 2024-12-09T03:27:28,047 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.7 K, sizeToCheck=16.0 K 2024-12-09T03:27:28,047 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:27:28,047 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/5b008c2667bf4d56adc06951d479b11a because midkey is the same as first or last row 2024-12-09T03:27:28,047 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.7 K, sizeToCheck=16.0 K 2024-12-09T03:27:28,047 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:27:28,048 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/5b008c2667bf4d56adc06951d479b11a because midkey is the same as first or last row 2024-12-09T03:27:28,048 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:27:28,048 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d4e2ff6e64d18ee11bc1992b0fcdca97:info 2024-12-09T03:27:28,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:28,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:29,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:29,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:30,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:30,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:31,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:31,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:32,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:32,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:33,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:33,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:34,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:34,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:35,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:35,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:36,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:36,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:37,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:37,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d4e2ff6e64d18ee11bc1992b0fcdca97 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-12-09T03:27:37,709 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/d9300bc2983840e895ac8e8f8eefec04 is 1080, key is row0034/info:/1733714847578/Put/seqid=0 2024-12-09T03:27:37,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741841_1017 (size=28684) 2024-12-09T03:27:37,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741841_1017 (size=28684) 2024-12-09T03:27:37,718 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=69 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/d9300bc2983840e895ac8e8f8eefec04 2024-12-09T03:27:37,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T03:27:37,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:49000 deadline: 1733714867721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:37,722 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T03:27:37,722 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T03:27:37,722 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2 because the exception is null or not the one we care about 2024-12-09T03:27:37,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/d9300bc2983840e895ac8e8f8eefec04 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/d9300bc2983840e895ac8e8f8eefec04 2024-12-09T03:27:37,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/d9300bc2983840e895ac8e8f8eefec04, entries=22, sequenceid=69, filesize=28.0 K 2024-12-09T03:27:37,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=7.36 KB/7532 for d4e2ff6e64d18ee11bc1992b0fcdca97 in 27ms, sequenceid=69, compaction requested=false 2024-12-09T03:27:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d4e2ff6e64d18ee11bc1992b0fcdca97: 2024-12-09T03:27:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.7 K, sizeToCheck=16.0 K 2024-12-09T03:27:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:27:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/5b008c2667bf4d56adc06951d479b11a because midkey is the same as first or last row 2024-12-09T03:27:37,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:37,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:38,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:38,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:39,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:39,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:40,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:40,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:41,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:41,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:42,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:42,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:43,462 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T03:27:43,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:43,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:44,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:44,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:45,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:45,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:46,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:46,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:47,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:47,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:47,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:47,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d4e2ff6e64d18ee11bc1992b0fcdca97 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-09T03:27:47,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/b101a126b95843b8b5ee0329d4b3643c is 1080, key is row0056/info:/1733714857705/Put/seqid=0 2024-12-09T03:27:47,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741842_1018 (size=13586) 2024-12-09T03:27:47,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741842_1018 (size=13586) 2024-12-09T03:27:47,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/b101a126b95843b8b5ee0329d4b3643c 2024-12-09T03:27:47,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/b101a126b95843b8b5ee0329d4b3643c as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/b101a126b95843b8b5ee0329d4b3643c 2024-12-09T03:27:47,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/b101a126b95843b8b5ee0329d4b3643c, entries=8, sequenceid=80, filesize=13.3 K 2024-12-09T03:27:47,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=1.05 KB/1076 for d4e2ff6e64d18ee11bc1992b0fcdca97 in 31ms, sequenceid=80, compaction requested=true 2024-12-09T03:27:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d4e2ff6e64d18ee11bc1992b0fcdca97: 2024-12-09T03:27:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.0 K, sizeToCheck=16.0 K 2024-12-09T03:27:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:27:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/5b008c2667bf4d56adc06951d479b11a because midkey is the same as first or last row 2024-12-09T03:27:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d4e2ff6e64d18ee11bc1992b0fcdca97:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:27:47,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:27:47,815 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:27:47,816 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 82940 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:27:47,817 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1541): d4e2ff6e64d18ee11bc1992b0fcdca97/info is initiating minor compaction (all files) 2024-12-09T03:27:47,817 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d4e2ff6e64d18ee11bc1992b0fcdca97/info in TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:47,817 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/5b008c2667bf4d56adc06951d479b11a, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/d9300bc2983840e895ac8e8f8eefec04, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/b101a126b95843b8b5ee0329d4b3643c] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp, totalSize=81.0 K 2024-12-09T03:27:47,817 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5b008c2667bf4d56adc06951d479b11a, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733714845502 2024-12-09T03:27:47,818 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting d9300bc2983840e895ac8e8f8eefec04, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=69, earliestPutTs=1733714847578 2024-12-09T03:27:47,818 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting b101a126b95843b8b5ee0329d4b3643c, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733714857705 2024-12-09T03:27:47,832 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d4e2ff6e64d18ee11bc1992b0fcdca97#info#compaction#60 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:27:47,832 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/35f784d9e6a74f55a6780bf06c220784 is 1080, key is row0001/info:/1733714845502/Put/seqid=0 2024-12-09T03:27:47,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741843_1019 (size=73224) 2024-12-09T03:27:47,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741843_1019 (size=73224) 2024-12-09T03:27:47,850 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/35f784d9e6a74f55a6780bf06c220784 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/35f784d9e6a74f55a6780bf06c220784 2024-12-09T03:27:47,857 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d4e2ff6e64d18ee11bc1992b0fcdca97/info of d4e2ff6e64d18ee11bc1992b0fcdca97 into 35f784d9e6a74f55a6780bf06c220784(size=71.5 K), total size for store is 71.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:27:47,857 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d4e2ff6e64d18ee11bc1992b0fcdca97: 2024-12-09T03:27:47,857 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., storeName=d4e2ff6e64d18ee11bc1992b0fcdca97/info, priority=13, startTime=1733714867815; duration=0sec 2024-12-09T03:27:47,857 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-12-09T03:27:47,857 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:27:47,857 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-12-09T03:27:47,857 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:27:47,857 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-12-09T03:27:47,857 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T03:27:47,858 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:27:47,858 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:27:47,858 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d4e2ff6e64d18ee11bc1992b0fcdca97:info 2024-12-09T03:27:47,860 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38129 {}] assignment.AssignmentManager(1363): Split request from 1617b0b1421f,44043,1733714833682, parent={ENCODED => d4e2ff6e64d18ee11bc1992b0fcdca97, NAME => 'TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-09T03:27:47,866 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38129 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:47,870 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38129 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d4e2ff6e64d18ee11bc1992b0fcdca97, daughterA=e97476aa668f8fba455fa1666a01664d, daughterB=784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:47,872 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d4e2ff6e64d18ee11bc1992b0fcdca97, daughterA=e97476aa668f8fba455fa1666a01664d, daughterB=784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:47,872 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d4e2ff6e64d18ee11bc1992b0fcdca97, daughterA=e97476aa668f8fba455fa1666a01664d, daughterB=784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:47,872 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d4e2ff6e64d18ee11bc1992b0fcdca97, daughterA=e97476aa668f8fba455fa1666a01664d, daughterB=784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:47,879 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4e2ff6e64d18ee11bc1992b0fcdca97, UNASSIGN}] 2024-12-09T03:27:47,880 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4e2ff6e64d18ee11bc1992b0fcdca97, UNASSIGN 2024-12-09T03:27:47,882 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=d4e2ff6e64d18ee11bc1992b0fcdca97, regionState=CLOSING, regionLocation=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:47,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4e2ff6e64d18ee11bc1992b0fcdca97, UNASSIGN because future has completed 2024-12-09T03:27:47,885 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T03:27:47,885 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682}] 2024-12-09T03:27:48,043 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:48,043 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T03:27:48,043 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing d4e2ff6e64d18ee11bc1992b0fcdca97, disabling compactions & flushes 2024-12-09T03:27:48,043 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:48,043 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:48,044 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. after waiting 0 ms 2024-12-09T03:27:48,044 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:48,044 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing d4e2ff6e64d18ee11bc1992b0fcdca97 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T03:27:48,049 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/be68c05c7c7248c69d89116c8616a615 is 1080, key is row0064/info:/1733714867786/Put/seqid=0 2024-12-09T03:27:48,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741844_1020 (size=6033) 2024-12-09T03:27:48,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741844_1020 (size=6033) 2024-12-09T03:27:48,054 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/be68c05c7c7248c69d89116c8616a615 2024-12-09T03:27:48,060 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/.tmp/info/be68c05c7c7248c69d89116c8616a615 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/be68c05c7c7248c69d89116c8616a615 2024-12-09T03:27:48,065 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/be68c05c7c7248c69d89116c8616a615, entries=1, sequenceid=85, filesize=5.9 K 2024-12-09T03:27:48,066 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for d4e2ff6e64d18ee11bc1992b0fcdca97 in 22ms, sequenceid=85, compaction requested=false 2024-12-09T03:27:48,067 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/06f7a9e333ab417c81faefd291b43477, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/284cf55b3799468189354d6fc5e87673, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/5b008c2667bf4d56adc06951d479b11a, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/e02912d210fc4fc1998c0054ae7ddb70, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/d9300bc2983840e895ac8e8f8eefec04, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/b101a126b95843b8b5ee0329d4b3643c] to archive 2024-12-09T03:27:48,068 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T03:27:48,070 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/06f7a9e333ab417c81faefd291b43477 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/06f7a9e333ab417c81faefd291b43477 2024-12-09T03:27:48,072 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/284cf55b3799468189354d6fc5e87673 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/284cf55b3799468189354d6fc5e87673 2024-12-09T03:27:48,073 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/5b008c2667bf4d56adc06951d479b11a to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/5b008c2667bf4d56adc06951d479b11a 2024-12-09T03:27:48,075 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/e02912d210fc4fc1998c0054ae7ddb70 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/e02912d210fc4fc1998c0054ae7ddb70 2024-12-09T03:27:48,076 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/d9300bc2983840e895ac8e8f8eefec04 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/d9300bc2983840e895ac8e8f8eefec04 2024-12-09T03:27:48,077 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/b101a126b95843b8b5ee0329d4b3643c to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/b101a126b95843b8b5ee0329d4b3643c 2024-12-09T03:27:48,082 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=1 2024-12-09T03:27:48,083 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. 2024-12-09T03:27:48,083 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for d4e2ff6e64d18ee11bc1992b0fcdca97: Waiting for close lock at 1733714868043Running coprocessor pre-close hooks at 1733714868043Disabling compacts and flushes for region at 1733714868043Disabling writes for close at 1733714868044 (+1 ms)Obtaining lock to block concurrent updates at 1733714868044Preparing flush snapshotting stores in d4e2ff6e64d18ee11bc1992b0fcdca97 at 1733714868044Finished memstore snapshotting TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733714868044Flushing stores of TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. at 1733714868045 (+1 ms)Flushing d4e2ff6e64d18ee11bc1992b0fcdca97/info: creating writer at 1733714868045Flushing d4e2ff6e64d18ee11bc1992b0fcdca97/info: appending metadata at 1733714868048 (+3 ms)Flushing d4e2ff6e64d18ee11bc1992b0fcdca97/info: closing flushed file at 1733714868048Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66cf3e81: reopening flushed file at 1733714868059 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for d4e2ff6e64d18ee11bc1992b0fcdca97 in 22ms, sequenceid=85, compaction requested=false at 1733714868066 (+7 ms)Writing region close event to WAL at 1733714868079 (+13 ms)Running coprocessor post-close hooks at 1733714868083 (+4 ms)Closed at 1733714868083 2024-12-09T03:27:48,085 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:48,086 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=d4e2ff6e64d18ee11bc1992b0fcdca97, regionState=CLOSED 2024-12-09T03:27:48,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 because future has completed 2024-12-09T03:27:48,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-09T03:27:48,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure d4e2ff6e64d18ee11bc1992b0fcdca97, server=1617b0b1421f,44043,1733714833682 in 204 msec 2024-12-09T03:27:48,094 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T03:27:48,094 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d4e2ff6e64d18ee11bc1992b0fcdca97, UNASSIGN in 213 msec 2024-12-09T03:27:48,102 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:48,105 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=d4e2ff6e64d18ee11bc1992b0fcdca97, threads=2 2024-12-09T03:27:48,107 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/be68c05c7c7248c69d89116c8616a615 for region: d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:48,107 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/35f784d9e6a74f55a6780bf06c220784 for region: d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:48,119 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/be68c05c7c7248c69d89116c8616a615, top=true 2024-12-09T03:27:48,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741845_1021 (size=27) 2024-12-09T03:27:48,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741845_1021 (size=27) 2024-12-09T03:27:48,132 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/TestLogRolling-testLogRolling=d4e2ff6e64d18ee11bc1992b0fcdca97-be68c05c7c7248c69d89116c8616a615 for child: 784580dedcd7bf1485608e5fe5f1f043, parent: d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:48,132 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/be68c05c7c7248c69d89116c8616a615 for region: d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:48,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741846_1022 (size=27) 2024-12-09T03:27:48,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741846_1022 (size=27) 2024-12-09T03:27:48,142 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/35f784d9e6a74f55a6780bf06c220784 for region: d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:27:48,144 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region d4e2ff6e64d18ee11bc1992b0fcdca97 Daughter A: [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97] storefiles, Daughter B: [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/TestLogRolling-testLogRolling=d4e2ff6e64d18ee11bc1992b0fcdca97-be68c05c7c7248c69d89116c8616a615] storefiles. 2024-12-09T03:27:48,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741847_1023 (size=71) 2024-12-09T03:27:48,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741847_1023 (size=71) 2024-12-09T03:27:48,154 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:48,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741848_1024 (size=71) 2024-12-09T03:27:48,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741848_1024 (size=71) 2024-12-09T03:27:48,166 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:48,175 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-12-09T03:27:48,177 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-12-09T03:27:48,179 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733714868179"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733714868179"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733714868179"}]},"ts":"1733714868179"} 2024-12-09T03:27:48,180 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733714868179"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733714868179"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733714868179"}]},"ts":"1733714868179"} 2024-12-09T03:27:48,180 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733714868179"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733714868179"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733714868179"}]},"ts":"1733714868179"} 2024-12-09T03:27:48,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e97476aa668f8fba455fa1666a01664d, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=784580dedcd7bf1485608e5fe5f1f043, ASSIGN}] 2024-12-09T03:27:48,198 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e97476aa668f8fba455fa1666a01664d, ASSIGN 2024-12-09T03:27:48,198 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=784580dedcd7bf1485608e5fe5f1f043, ASSIGN 2024-12-09T03:27:48,199 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=784580dedcd7bf1485608e5fe5f1f043, ASSIGN; state=SPLITTING_NEW, location=1617b0b1421f,44043,1733714833682; forceNewPlan=false, retain=false 2024-12-09T03:27:48,199 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e97476aa668f8fba455fa1666a01664d, ASSIGN; state=SPLITTING_NEW, location=1617b0b1421f,44043,1733714833682; forceNewPlan=false, retain=false 2024-12-09T03:27:48,350 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=784580dedcd7bf1485608e5fe5f1f043, regionState=OPENING, regionLocation=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:48,350 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e97476aa668f8fba455fa1666a01664d, regionState=OPENING, regionLocation=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:48,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e97476aa668f8fba455fa1666a01664d, ASSIGN because future has completed 2024-12-09T03:27:48,353 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e97476aa668f8fba455fa1666a01664d, server=1617b0b1421f,44043,1733714833682}] 2024-12-09T03:27:48,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=784580dedcd7bf1485608e5fe5f1f043, ASSIGN because future has completed 2024-12-09T03:27:48,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 784580dedcd7bf1485608e5fe5f1f043, server=1617b0b1421f,44043,1733714833682}] 2024-12-09T03:27:48,509 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d. 2024-12-09T03:27:48,509 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => e97476aa668f8fba455fa1666a01664d, NAME => 'TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-09T03:27:48,509 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e97476aa668f8fba455fa1666a01664d 2024-12-09T03:27:48,509 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:27:48,509 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for e97476aa668f8fba455fa1666a01664d 2024-12-09T03:27:48,509 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for e97476aa668f8fba455fa1666a01664d 2024-12-09T03:27:48,511 INFO [StoreOpener-e97476aa668f8fba455fa1666a01664d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e97476aa668f8fba455fa1666a01664d 2024-12-09T03:27:48,511 INFO [StoreOpener-e97476aa668f8fba455fa1666a01664d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e97476aa668f8fba455fa1666a01664d columnFamilyName info 2024-12-09T03:27:48,511 DEBUG [StoreOpener-e97476aa668f8fba455fa1666a01664d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:48,523 DEBUG [StoreOpener-e97476aa668f8fba455fa1666a01664d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97->hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/35f784d9e6a74f55a6780bf06c220784-bottom 2024-12-09T03:27:48,524 INFO [StoreOpener-e97476aa668f8fba455fa1666a01664d-1 {}] regionserver.HStore(327): Store=e97476aa668f8fba455fa1666a01664d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:27:48,524 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for e97476aa668f8fba455fa1666a01664d 2024-12-09T03:27:48,525 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d 2024-12-09T03:27:48,526 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d 2024-12-09T03:27:48,527 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for e97476aa668f8fba455fa1666a01664d 2024-12-09T03:27:48,527 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for e97476aa668f8fba455fa1666a01664d 2024-12-09T03:27:48,529 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for e97476aa668f8fba455fa1666a01664d 2024-12-09T03:27:48,530 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened e97476aa668f8fba455fa1666a01664d; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=697149, jitterRate=-0.11352992057800293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T03:27:48,530 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e97476aa668f8fba455fa1666a01664d 2024-12-09T03:27:48,531 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for e97476aa668f8fba455fa1666a01664d: Running coprocessor pre-open hook at 1733714868509Writing region info on filesystem at 1733714868509Initializing all the Stores at 1733714868510 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714868510Cleaning up temporary data from old regions at 1733714868527 (+17 ms)Running coprocessor post-open hooks at 1733714868530 (+3 ms)Region opened successfully at 1733714868531 (+1 ms) 2024-12-09T03:27:48,532 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d., pid=12, masterSystemTime=1733714868505 2024-12-09T03:27:48,532 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store e97476aa668f8fba455fa1666a01664d:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:27:48,532 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:27:48,532 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-09T03:27:48,533 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d. 2024-12-09T03:27:48,534 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1541): e97476aa668f8fba455fa1666a01664d/info is initiating minor compaction (all files) 2024-12-09T03:27:48,534 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e97476aa668f8fba455fa1666a01664d/info in TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d. 2024-12-09T03:27:48,534 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97->hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/35f784d9e6a74f55a6780bf06c220784-bottom] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/.tmp, totalSize=71.5 K 2024-12-09T03:27:48,534 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d. 2024-12-09T03:27:48,534 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d. 2024-12-09T03:27:48,535 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:27:48,535 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 784580dedcd7bf1485608e5fe5f1f043, NAME => 'TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-09T03:27:48,535 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733714845502 2024-12-09T03:27:48,535 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:48,535 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:27:48,535 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:48,535 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:48,535 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e97476aa668f8fba455fa1666a01664d, regionState=OPEN, openSeqNum=89, regionLocation=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:48,536 INFO [StoreOpener-784580dedcd7bf1485608e5fe5f1f043-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:48,537 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-09T03:27:48,537 INFO [StoreOpener-784580dedcd7bf1485608e5fe5f1f043-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 784580dedcd7bf1485608e5fe5f1f043 columnFamilyName info 2024-12-09T03:27:48,537 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-09T03:27:48,538 DEBUG [StoreOpener-784580dedcd7bf1485608e5fe5f1f043-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:27:48,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-12-09T03:27:48,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e97476aa668f8fba455fa1666a01664d, server=1617b0b1421f,44043,1733714833682 because future has completed 2024-12-09T03:27:48,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-09T03:27:48,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure e97476aa668f8fba455fa1666a01664d, server=1617b0b1421f,44043,1733714833682 in 187 msec 2024-12-09T03:27:48,545 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e97476aa668f8fba455fa1666a01664d, ASSIGN in 346 msec 2024-12-09T03:27:48,556 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e97476aa668f8fba455fa1666a01664d#info#compaction#62 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:27:48,556 DEBUG [StoreOpener-784580dedcd7bf1485608e5fe5f1f043-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97->hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/35f784d9e6a74f55a6780bf06c220784-top 2024-12-09T03:27:48,557 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/.tmp/info/eb0465ab431f493a81de8473f5ad6432 is 1080, key is row0001/info:/1733714845502/Put/seqid=0 2024-12-09T03:27:48,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/info/083c93d1e59b4e8b97b5bee0b99dd29a is 193, key is TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043./info:regioninfo/1733714868350/Put/seqid=0 2024-12-09T03:27:48,565 DEBUG [StoreOpener-784580dedcd7bf1485608e5fe5f1f043-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/TestLogRolling-testLogRolling=d4e2ff6e64d18ee11bc1992b0fcdca97-be68c05c7c7248c69d89116c8616a615 2024-12-09T03:27:48,565 INFO [StoreOpener-784580dedcd7bf1485608e5fe5f1f043-1 {}] regionserver.HStore(327): Store=784580dedcd7bf1485608e5fe5f1f043/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:27:48,565 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:48,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741850_1026 (size=70862) 2024-12-09T03:27:48,566 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:48,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741850_1026 (size=70862) 2024-12-09T03:27:48,568 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:48,568 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:48,568 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:48,570 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:48,571 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 784580dedcd7bf1485608e5fe5f1f043; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702159, jitterRate=-0.10715986788272858}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T03:27:48,571 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:48,572 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 784580dedcd7bf1485608e5fe5f1f043: Running coprocessor pre-open hook at 1733714868535Writing region info on filesystem at 1733714868535Initializing all the Stores at 1733714868536 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714868536Cleaning up temporary data from old regions at 1733714868568 (+32 ms)Running coprocessor post-open hooks at 1733714868571 (+3 ms)Region opened successfully at 1733714868571 2024-12-09T03:27:48,572 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., pid=13, masterSystemTime=1733714868505 2024-12-09T03:27:48,573 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 784580dedcd7bf1485608e5fe5f1f043:info, priority=-2147483648, current under compaction store size is 2 2024-12-09T03:27:48,573 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:27:48,573 DEBUG [RS:0;1617b0b1421f:44043-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-09T03:27:48,574 INFO [RS:0;1617b0b1421f:44043-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:27:48,574 DEBUG [RS:0;1617b0b1421f:44043-longCompactions-0 {}] regionserver.HStore(1541): 784580dedcd7bf1485608e5fe5f1f043/info is initiating minor compaction (all files) 2024-12-09T03:27:48,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741849_1025 (size=9847) 2024-12-09T03:27:48,574 INFO [RS:0;1617b0b1421f:44043-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 784580dedcd7bf1485608e5fe5f1f043/info in TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:27:48,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741849_1025 (size=9847) 2024-12-09T03:27:48,574 INFO [RS:0;1617b0b1421f:44043-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97->hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/35f784d9e6a74f55a6780bf06c220784-top, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/TestLogRolling-testLogRolling=d4e2ff6e64d18ee11bc1992b0fcdca97-be68c05c7c7248c69d89116c8616a615] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp, totalSize=77.4 K 2024-12-09T03:27:48,575 DEBUG [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:27:48,575 INFO [RS_OPEN_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:27:48,575 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/info/083c93d1e59b4e8b97b5bee0b99dd29a 2024-12-09T03:27:48,576 DEBUG [RS:0;1617b0b1421f:44043-longCompactions-0 {}] compactions.Compactor(225): Compacting 35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733714845502 2024-12-09T03:27:48,576 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=784580dedcd7bf1485608e5fe5f1f043, regionState=OPEN, openSeqNum=89, regionLocation=1617b0b1421f,44043,1733714833682 2024-12-09T03:27:48,577 DEBUG [RS:0;1617b0b1421f:44043-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=d4e2ff6e64d18ee11bc1992b0fcdca97-be68c05c7c7248c69d89116c8616a615, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1733714867786 2024-12-09T03:27:48,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 784580dedcd7bf1485608e5fe5f1f043, server=1617b0b1421f,44043,1733714833682 because future has completed 2024-12-09T03:27:48,582 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/.tmp/info/eb0465ab431f493a81de8473f5ad6432 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/info/eb0465ab431f493a81de8473f5ad6432 2024-12-09T03:27:48,590 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-12-09T03:27:48,590 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 784580dedcd7bf1485608e5fe5f1f043, server=1617b0b1421f,44043,1733714833682 in 233 msec 2024-12-09T03:27:48,593 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-12-09T03:27:48,593 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=784580dedcd7bf1485608e5fe5f1f043, ASSIGN in 393 msec 2024-12-09T03:27:48,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d4e2ff6e64d18ee11bc1992b0fcdca97, daughterA=e97476aa668f8fba455fa1666a01664d, daughterB=784580dedcd7bf1485608e5fe5f1f043 in 727 msec 2024-12-09T03:27:48,599 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in e97476aa668f8fba455fa1666a01664d/info of e97476aa668f8fba455fa1666a01664d into eb0465ab431f493a81de8473f5ad6432(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:27:48,599 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e97476aa668f8fba455fa1666a01664d: 2024-12-09T03:27:48,599 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d., storeName=e97476aa668f8fba455fa1666a01664d/info, priority=15, startTime=1733714868532; duration=0sec 2024-12-09T03:27:48,600 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:27:48,600 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e97476aa668f8fba455fa1666a01664d:info 2024-12-09T03:27:48,604 INFO [RS:0;1617b0b1421f:44043-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 784580dedcd7bf1485608e5fe5f1f043#info#compaction#64 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:27:48,604 DEBUG [RS:0;1617b0b1421f:44043-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/152ae95d694d45f4aa3d3c5d1cc54255 is 1080, key is row0062/info:/1733714857719/Put/seqid=0 2024-12-09T03:27:48,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741851_1027 (size=8359) 2024-12-09T03:27:48,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741851_1027 (size=8359) 2024-12-09T03:27:48,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/ns/d99830e983734a10814128c1eae67ebe is 43, key is default/ns:d/1733714835330/Put/seqid=0 2024-12-09T03:27:48,615 DEBUG [RS:0;1617b0b1421f:44043-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/152ae95d694d45f4aa3d3c5d1cc54255 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/152ae95d694d45f4aa3d3c5d1cc54255 2024-12-09T03:27:48,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741852_1028 (size=5153) 2024-12-09T03:27:48,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741852_1028 (size=5153) 2024-12-09T03:27:48,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/ns/d99830e983734a10814128c1eae67ebe 2024-12-09T03:27:48,621 INFO [RS:0;1617b0b1421f:44043-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in 784580dedcd7bf1485608e5fe5f1f043/info of 784580dedcd7bf1485608e5fe5f1f043 into 152ae95d694d45f4aa3d3c5d1cc54255(size=8.2 K), total size for store is 8.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:27:48,621 DEBUG [RS:0;1617b0b1421f:44043-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:27:48,621 INFO [RS:0;1617b0b1421f:44043-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., storeName=784580dedcd7bf1485608e5fe5f1f043/info, priority=14, startTime=1733714868572; duration=0sec 2024-12-09T03:27:48,621 DEBUG [RS:0;1617b0b1421f:44043-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:27:48,621 DEBUG [RS:0;1617b0b1421f:44043-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 784580dedcd7bf1485608e5fe5f1f043:info 2024-12-09T03:27:48,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/table/c40cd6f41a5f4d4db5bce3896405bc76 is 65, key is TestLogRolling-testLogRolling/table:state/1733714835816/Put/seqid=0 2024-12-09T03:27:48,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741853_1029 (size=5340) 2024-12-09T03:27:48,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741853_1029 (size=5340) 2024-12-09T03:27:48,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/table/c40cd6f41a5f4d4db5bce3896405bc76 2024-12-09T03:27:48,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/info/083c93d1e59b4e8b97b5bee0b99dd29a as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/info/083c93d1e59b4e8b97b5bee0b99dd29a 2024-12-09T03:27:48,648 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/info/083c93d1e59b4e8b97b5bee0b99dd29a, entries=30, sequenceid=17, filesize=9.6 K 2024-12-09T03:27:48,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/ns/d99830e983734a10814128c1eae67ebe as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/ns/d99830e983734a10814128c1eae67ebe 2024-12-09T03:27:48,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/ns/d99830e983734a10814128c1eae67ebe, entries=2, sequenceid=17, filesize=5.0 K 2024-12-09T03:27:48,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/table/c40cd6f41a5f4d4db5bce3896405bc76 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/table/c40cd6f41a5f4d4db5bce3896405bc76 2024-12-09T03:27:48,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/table/c40cd6f41a5f4d4db5bce3896405bc76, entries=2, sequenceid=17, filesize=5.2 K 2024-12-09T03:27:48,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 126ms, sequenceid=17, compaction requested=false 2024-12-09T03:27:48,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T03:27:48,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:48,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:49,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:49,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:49000 deadline: 1733714879789, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. is not online on 1617b0b1421f,44043,1733714833682 2024-12-09T03:27:49,790 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. is not online on 1617b0b1421f,44043,1733714833682 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T03:27:49,790 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97. is not online on 1617b0b1421f,44043,1733714833682 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T03:27:49,790 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733714835450.d4e2ff6e64d18ee11bc1992b0fcdca97., hostname=1617b0b1421f,44043,1733714833682, seqNum=2 from cache 2024-12-09T03:27:50,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:50,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:51,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:51,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:52,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:52,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:53,085 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,628 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:27:53,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:27:53,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:53,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:54,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:54,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:55,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:55,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:55,956 INFO [master/1617b0b1421f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T03:27:55,956 INFO [master/1617b0b1421f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T03:27:56,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:56,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:57,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:57,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:58,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:58,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:59,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:59,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:27:59,840 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., hostname=1617b0b1421f,44043,1733714833682, seqNum=89] 2024-12-09T03:27:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:59,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T03:27:59,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/133f3b18373d426f99ceb69ba719b2bc is 1080, key is row0065/info:/1733714879841/Put/seqid=0 2024-12-09T03:27:59,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741854_1030 (size=12509) 2024-12-09T03:27:59,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741854_1030 (size=12509) 2024-12-09T03:27:59,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/133f3b18373d426f99ceb69ba719b2bc 2024-12-09T03:27:59,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/133f3b18373d426f99ceb69ba719b2bc as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/133f3b18373d426f99ceb69ba719b2bc 2024-12-09T03:27:59,883 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/133f3b18373d426f99ceb69ba719b2bc, entries=7, sequenceid=99, filesize=12.2 K 2024-12-09T03:27:59,884 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 784580dedcd7bf1485608e5fe5f1f043 in 30ms, sequenceid=99, compaction requested=false 2024-12-09T03:27:59,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:27:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:27:59,886 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-09T03:27:59,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/8dbe1819dca648568bb7fe8260ac7c98 is 1080, key is row0072/info:/1733714879856/Put/seqid=0 2024-12-09T03:27:59,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741855_1031 (size=20064) 2024-12-09T03:27:59,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741855_1031 (size=20064) 2024-12-09T03:27:59,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/8dbe1819dca648568bb7fe8260ac7c98 2024-12-09T03:27:59,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/8dbe1819dca648568bb7fe8260ac7c98 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8dbe1819dca648568bb7fe8260ac7c98 2024-12-09T03:27:59,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8dbe1819dca648568bb7fe8260ac7c98, entries=14, sequenceid=116, filesize=19.6 K 2024-12-09T03:27:59,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 784580dedcd7bf1485608e5fe5f1f043 in 55ms, sequenceid=116, compaction requested=true 2024-12-09T03:27:59,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:27:59,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 784580dedcd7bf1485608e5fe5f1f043:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:27:59,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:27:59,941 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:27:59,942 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40932 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:27:59,943 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1541): 784580dedcd7bf1485608e5fe5f1f043/info is initiating minor compaction (all files) 2024-12-09T03:27:59,943 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 784580dedcd7bf1485608e5fe5f1f043/info in TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:27:59,943 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/152ae95d694d45f4aa3d3c5d1cc54255, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/133f3b18373d426f99ceb69ba719b2bc, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8dbe1819dca648568bb7fe8260ac7c98] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp, totalSize=40.0 K 2024-12-09T03:27:59,943 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 152ae95d694d45f4aa3d3c5d1cc54255, keycount=3, bloomtype=ROW, size=8.2 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1733714857719 2024-12-09T03:27:59,944 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 133f3b18373d426f99ceb69ba719b2bc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1733714879841 2024-12-09T03:27:59,944 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8dbe1819dca648568bb7fe8260ac7c98, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733714879856 2024-12-09T03:27:59,954 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 784580dedcd7bf1485608e5fe5f1f043#info#compaction#69 average throughput is 24.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:27:59,955 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/33bc327f958a43b08f7909cbcd2c96ec is 1080, key is row0062/info:/1733714857719/Put/seqid=0 2024-12-09T03:27:59,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741856_1032 (size=31106) 2024-12-09T03:27:59,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741856_1032 (size=31106) 2024-12-09T03:27:59,985 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/33bc327f958a43b08f7909cbcd2c96ec as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/33bc327f958a43b08f7909cbcd2c96ec 2024-12-09T03:27:59,991 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 784580dedcd7bf1485608e5fe5f1f043/info of 784580dedcd7bf1485608e5fe5f1f043 into 33bc327f958a43b08f7909cbcd2c96ec(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:27:59,991 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:27:59,992 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., storeName=784580dedcd7bf1485608e5fe5f1f043/info, priority=13, startTime=1733714879941; duration=0sec 2024-12-09T03:27:59,992 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:27:59,992 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 784580dedcd7bf1485608e5fe5f1f043:info 2024-12-09T03:28:00,183 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-12-09T03:28:00,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:00,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:01,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:01,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:01,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:01,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-09T03:28:01,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/1f38fba6680942b496096b78387979ae is 1080, key is row0086/info:/1733714879887/Put/seqid=0 2024-12-09T03:28:01,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741857_1033 (size=17896) 2024-12-09T03:28:01,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741857_1033 (size=17896) 2024-12-09T03:28:01,938 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/1f38fba6680942b496096b78387979ae 2024-12-09T03:28:01,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/1f38fba6680942b496096b78387979ae as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/1f38fba6680942b496096b78387979ae 2024-12-09T03:28:01,949 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/1f38fba6680942b496096b78387979ae, entries=12, sequenceid=132, filesize=17.5 K 2024-12-09T03:28:01,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 784580dedcd7bf1485608e5fe5f1f043 in 21ms, sequenceid=132, compaction requested=false 2024-12-09T03:28:01,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:01,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:01,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-09T03:28:01,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/c41d601412d44f61b64e599aba10c768 is 1080, key is row0098/info:/1733714881930/Put/seqid=0 2024-12-09T03:28:01,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741858_1034 (size=17906) 2024-12-09T03:28:01,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741858_1034 (size=17906) 2024-12-09T03:28:01,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=147 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/c41d601412d44f61b64e599aba10c768 2024-12-09T03:28:01,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/c41d601412d44f61b64e599aba10c768 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/c41d601412d44f61b64e599aba10c768 2024-12-09T03:28:01,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/c41d601412d44f61b64e599aba10c768, entries=12, sequenceid=147, filesize=17.5 K 2024-12-09T03:28:01,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for 784580dedcd7bf1485608e5fe5f1f043 in 39ms, sequenceid=147, compaction requested=true 2024-12-09T03:28:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 784580dedcd7bf1485608e5fe5f1f043:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:28:01,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:01,991 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:28:01,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:01,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-09T03:28:01,993 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 66908 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:28:01,993 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1541): 784580dedcd7bf1485608e5fe5f1f043/info is initiating minor compaction (all files) 2024-12-09T03:28:01,993 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 784580dedcd7bf1485608e5fe5f1f043/info in TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:01,993 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/33bc327f958a43b08f7909cbcd2c96ec, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/1f38fba6680942b496096b78387979ae, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/c41d601412d44f61b64e599aba10c768] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp, totalSize=65.3 K 2024-12-09T03:28:01,993 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 33bc327f958a43b08f7909cbcd2c96ec, keycount=24, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733714857719 2024-12-09T03:28:01,994 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1f38fba6680942b496096b78387979ae, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733714879887 2024-12-09T03:28:01,994 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting c41d601412d44f61b64e599aba10c768, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=147, earliestPutTs=1733714881930 2024-12-09T03:28:01,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/7e41bf462d09486cbb1c86d9b58bf809 is 1080, key is row0110/info:/1733714881954/Put/seqid=0 2024-12-09T03:28:01,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741859_1035 (size=24394) 2024-12-09T03:28:02,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741859_1035 (size=24394) 2024-12-09T03:28:02,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/7e41bf462d09486cbb1c86d9b58bf809 2024-12-09T03:28:02,004 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 784580dedcd7bf1485608e5fe5f1f043#info#compaction#73 average throughput is 49.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:28:02,004 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/5fed1de5128541ccb78b56228237682f is 1080, key is row0062/info:/1733714857719/Put/seqid=0 2024-12-09T03:28:02,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/7e41bf462d09486cbb1c86d9b58bf809 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/7e41bf462d09486cbb1c86d9b58bf809 2024-12-09T03:28:02,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741860_1036 (size=57110) 2024-12-09T03:28:02,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741860_1036 (size=57110) 2024-12-09T03:28:02,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/7e41bf462d09486cbb1c86d9b58bf809, entries=18, sequenceid=168, filesize=23.8 K 2024-12-09T03:28:02,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=1.05 KB/1076 for 784580dedcd7bf1485608e5fe5f1f043 in 23ms, sequenceid=168, compaction requested=false 2024-12-09T03:28:02,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:02,016 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/5fed1de5128541ccb78b56228237682f as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/5fed1de5128541ccb78b56228237682f 2024-12-09T03:28:02,021 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 784580dedcd7bf1485608e5fe5f1f043/info of 784580dedcd7bf1485608e5fe5f1f043 into 5fed1de5128541ccb78b56228237682f(size=55.8 K), total size for store is 79.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:28:02,021 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:02,021 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., storeName=784580dedcd7bf1485608e5fe5f1f043/info, priority=13, startTime=1733714881991; duration=0sec 2024-12-09T03:28:02,022 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:02,022 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 784580dedcd7bf1485608e5fe5f1f043:info 2024-12-09T03:28:02,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:02,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:03,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:03,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:04,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:04,009 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T03:28:04,014 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/2847c181e92a4efaa00c44b75e73d568 is 1080, key is row0128/info:/1733714881993/Put/seqid=0 2024-12-09T03:28:04,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741861_1037 (size=12516) 2024-12-09T03:28:04,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741861_1037 (size=12516) 2024-12-09T03:28:04,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/2847c181e92a4efaa00c44b75e73d568 2024-12-09T03:28:04,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/2847c181e92a4efaa00c44b75e73d568 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2847c181e92a4efaa00c44b75e73d568 2024-12-09T03:28:04,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2847c181e92a4efaa00c44b75e73d568, entries=7, sequenceid=179, filesize=12.2 K 2024-12-09T03:28:04,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 784580dedcd7bf1485608e5fe5f1f043 in 35ms, sequenceid=179, compaction requested=true 2024-12-09T03:28:04,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:04,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 784580dedcd7bf1485608e5fe5f1f043:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:28:04,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:04,044 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:28:04,045 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94020 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:28:04,046 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1541): 784580dedcd7bf1485608e5fe5f1f043/info is initiating minor compaction (all files) 2024-12-09T03:28:04,046 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 784580dedcd7bf1485608e5fe5f1f043/info in TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:04,046 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/5fed1de5128541ccb78b56228237682f, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/7e41bf462d09486cbb1c86d9b58bf809, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2847c181e92a4efaa00c44b75e73d568] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp, totalSize=91.8 K 2024-12-09T03:28:04,046 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5fed1de5128541ccb78b56228237682f, keycount=48, bloomtype=ROW, size=55.8 K, encoding=NONE, compression=NONE, seqNum=147, earliestPutTs=1733714857719 2024-12-09T03:28:04,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:04,046 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e41bf462d09486cbb1c86d9b58bf809, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733714881954 2024-12-09T03:28:04,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-09T03:28:04,047 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2847c181e92a4efaa00c44b75e73d568, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1733714881993 2024-12-09T03:28:04,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/a6cbf4e56f84493aa2d7b78daab9e5a9 is 1080, key is row0135/info:/1733714884010/Put/seqid=0 2024-12-09T03:28:04,065 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 784580dedcd7bf1485608e5fe5f1f043#info#compaction#76 average throughput is 24.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:28:04,066 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/f0153a54bef54a0ebf84240ee486e4c6 is 1080, key is row0062/info:/1733714857719/Put/seqid=0 2024-12-09T03:28:04,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741862_1038 (size=24394) 2024-12-09T03:28:04,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741862_1038 (size=24394) 2024-12-09T03:28:04,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/a6cbf4e56f84493aa2d7b78daab9e5a9 2024-12-09T03:28:04,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/a6cbf4e56f84493aa2d7b78daab9e5a9 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a6cbf4e56f84493aa2d7b78daab9e5a9 2024-12-09T03:28:04,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741863_1039 (size=84299) 2024-12-09T03:28:04,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a6cbf4e56f84493aa2d7b78daab9e5a9, entries=18, sequenceid=200, filesize=23.8 K 2024-12-09T03:28:04,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=8.41 KB/8608 for 784580dedcd7bf1485608e5fe5f1f043 in 37ms, sequenceid=200, compaction requested=false 2024-12-09T03:28:04,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:04,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741863_1039 (size=84299) 2024-12-09T03:28:04,091 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/f0153a54bef54a0ebf84240ee486e4c6 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/f0153a54bef54a0ebf84240ee486e4c6 2024-12-09T03:28:04,098 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 784580dedcd7bf1485608e5fe5f1f043/info of 784580dedcd7bf1485608e5fe5f1f043 into f0153a54bef54a0ebf84240ee486e4c6(size=82.3 K), total size for store is 106.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:28:04,098 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:04,098 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., storeName=784580dedcd7bf1485608e5fe5f1f043/info, priority=13, startTime=1733714884044; duration=0sec 2024-12-09T03:28:04,098 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:04,098 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 784580dedcd7bf1485608e5fe5f1f043:info 2024-12-09T03:28:04,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:04,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:05,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:05,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:06,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-09T03:28:06,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/ec6c0fb96be24681ad5c187efb9c3a2d is 1080, key is row0153/info:/1733714884048/Put/seqid=0 2024-12-09T03:28:06,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741864_1040 (size=14672) 2024-12-09T03:28:06,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741864_1040 (size=14672) 2024-12-09T03:28:06,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/ec6c0fb96be24681ad5c187efb9c3a2d 2024-12-09T03:28:06,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/ec6c0fb96be24681ad5c187efb9c3a2d as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/ec6c0fb96be24681ad5c187efb9c3a2d 2024-12-09T03:28:06,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/ec6c0fb96be24681ad5c187efb9c3a2d, entries=9, sequenceid=213, filesize=14.3 K 2024-12-09T03:28:06,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=10.51 KB/10760 for 784580dedcd7bf1485608e5fe5f1f043 in 23ms, sequenceid=213, compaction requested=true 2024-12-09T03:28:06,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:06,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 784580dedcd7bf1485608e5fe5f1f043:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:28:06,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:06,092 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:28:06,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:06,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-09T03:28:06,093 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123365 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:28:06,094 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1541): 784580dedcd7bf1485608e5fe5f1f043/info is initiating minor compaction (all files) 2024-12-09T03:28:06,094 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 784580dedcd7bf1485608e5fe5f1f043/info in TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:06,094 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/f0153a54bef54a0ebf84240ee486e4c6, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a6cbf4e56f84493aa2d7b78daab9e5a9, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/ec6c0fb96be24681ad5c187efb9c3a2d] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp, totalSize=120.5 K 2024-12-09T03:28:06,094 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting f0153a54bef54a0ebf84240ee486e4c6, keycount=73, bloomtype=ROW, size=82.3 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1733714857719 2024-12-09T03:28:06,094 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting a6cbf4e56f84493aa2d7b78daab9e5a9, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733714884010 2024-12-09T03:28:06,095 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting ec6c0fb96be24681ad5c187efb9c3a2d, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733714884048 2024-12-09T03:28:06,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/3254a7700dad4952a513bb7a3d082af2 is 1080, key is row0162/info:/1733714886071/Put/seqid=0 2024-12-09T03:28:06,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741865_1041 (size=16828) 2024-12-09T03:28:06,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741865_1041 (size=16828) 2024-12-09T03:28:06,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/3254a7700dad4952a513bb7a3d082af2 2024-12-09T03:28:06,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/3254a7700dad4952a513bb7a3d082af2 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3254a7700dad4952a513bb7a3d082af2 2024-12-09T03:28:06,113 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 784580dedcd7bf1485608e5fe5f1f043#info#compaction#79 average throughput is 34.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:28:06,114 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/2afa38ee869c4366af8aa354fe1543b1 is 1080, key is row0062/info:/1733714857719/Put/seqid=0 2024-12-09T03:28:06,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3254a7700dad4952a513bb7a3d082af2, entries=11, sequenceid=227, filesize=16.4 K 2024-12-09T03:28:06,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 784580dedcd7bf1485608e5fe5f1f043 in 27ms, sequenceid=227, compaction requested=false 2024-12-09T03:28:06,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:06,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741866_1042 (size=113515) 2024-12-09T03:28:06,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741866_1042 (size=113515) 2024-12-09T03:28:06,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:06,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-09T03:28:06,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/e2750dd033554a2ba57ee0cf9e73d7b3 is 1080, key is row0173/info:/1733714886094/Put/seqid=0 2024-12-09T03:28:06,127 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/2afa38ee869c4366af8aa354fe1543b1 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2afa38ee869c4366af8aa354fe1543b1 2024-12-09T03:28:06,133 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 784580dedcd7bf1485608e5fe5f1f043/info of 784580dedcd7bf1485608e5fe5f1f043 into 2afa38ee869c4366af8aa354fe1543b1(size=110.9 K), total size for store is 127.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:28:06,133 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:06,133 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., storeName=784580dedcd7bf1485608e5fe5f1f043/info, priority=13, startTime=1733714886092; duration=0sec 2024-12-09T03:28:06,134 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:06,134 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 784580dedcd7bf1485608e5fe5f1f043:info 2024-12-09T03:28:06,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741867_1043 (size=15750) 2024-12-09T03:28:06,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741867_1043 (size=15750) 2024-12-09T03:28:06,147 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/e2750dd033554a2ba57ee0cf9e73d7b3 2024-12-09T03:28:06,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/e2750dd033554a2ba57ee0cf9e73d7b3 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/e2750dd033554a2ba57ee0cf9e73d7b3 2024-12-09T03:28:06,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/e2750dd033554a2ba57ee0cf9e73d7b3, entries=10, sequenceid=240, filesize=15.4 K 2024-12-09T03:28:06,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=10.51 KB/10760 for 784580dedcd7bf1485608e5fe5f1f043 in 36ms, sequenceid=240, compaction requested=true 2024-12-09T03:28:06,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:06,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 784580dedcd7bf1485608e5fe5f1f043:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:28:06,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:06,158 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:28:06,159 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 146093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:28:06,159 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1541): 784580dedcd7bf1485608e5fe5f1f043/info is initiating minor compaction (all files) 2024-12-09T03:28:06,159 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 784580dedcd7bf1485608e5fe5f1f043/info in TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:06,159 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2afa38ee869c4366af8aa354fe1543b1, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3254a7700dad4952a513bb7a3d082af2, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/e2750dd033554a2ba57ee0cf9e73d7b3] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp, totalSize=142.7 K 2024-12-09T03:28:06,160 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2afa38ee869c4366af8aa354fe1543b1, keycount=100, bloomtype=ROW, size=110.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733714857719 2024-12-09T03:28:06,160 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3254a7700dad4952a513bb7a3d082af2, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1733714886071 2024-12-09T03:28:06,160 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting e2750dd033554a2ba57ee0cf9e73d7b3, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733714886094 2024-12-09T03:28:06,171 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 784580dedcd7bf1485608e5fe5f1f043#info#compaction#81 average throughput is 62.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:28:06,172 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/1e6a334245b54730bd7e539119946e35 is 1080, key is row0062/info:/1733714857719/Put/seqid=0 2024-12-09T03:28:06,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741868_1044 (size=136387) 2024-12-09T03:28:06,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741868_1044 (size=136387) 2024-12-09T03:28:06,184 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/1e6a334245b54730bd7e539119946e35 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/1e6a334245b54730bd7e539119946e35 2024-12-09T03:28:06,190 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 784580dedcd7bf1485608e5fe5f1f043/info of 784580dedcd7bf1485608e5fe5f1f043 into 1e6a334245b54730bd7e539119946e35(size=133.2 K), total size for store is 133.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:28:06,190 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:06,190 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., storeName=784580dedcd7bf1485608e5fe5f1f043/info, priority=13, startTime=1733714886158; duration=0sec 2024-12-09T03:28:06,190 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:06,190 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 784580dedcd7bf1485608e5fe5f1f043:info 2024-12-09T03:28:06,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:06,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:07,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:07,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:08,062 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=66, reuseRatio=88.00% 2024-12-09T03:28:08,063 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-09T03:28:08,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:08,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-09T03:28:08,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/b6967f361ccf4e18a31e6b7bb88cd0fd is 1080, key is row0183/info:/1733714886124/Put/seqid=0 2024-12-09T03:28:08,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741869_1045 (size=16828) 2024-12-09T03:28:08,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741869_1045 (size=16828) 2024-12-09T03:28:08,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/b6967f361ccf4e18a31e6b7bb88cd0fd 2024-12-09T03:28:08,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/b6967f361ccf4e18a31e6b7bb88cd0fd as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/b6967f361ccf4e18a31e6b7bb88cd0fd 2024-12-09T03:28:08,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/b6967f361ccf4e18a31e6b7bb88cd0fd, entries=11, sequenceid=256, filesize=16.4 K 2024-12-09T03:28:08,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 784580dedcd7bf1485608e5fe5f1f043 in 22ms, sequenceid=256, compaction requested=false 2024-12-09T03:28:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:08,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:08,167 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-09T03:28:08,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/8eb04007c0b5443e944eaf8df42d9a91 is 1080, key is row0194/info:/1733714888146/Put/seqid=0 2024-12-09T03:28:08,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741870_1046 (size=16839) 2024-12-09T03:28:08,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741870_1046 (size=16839) 2024-12-09T03:28:08,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/8eb04007c0b5443e944eaf8df42d9a91 2024-12-09T03:28:08,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/8eb04007c0b5443e944eaf8df42d9a91 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8eb04007c0b5443e944eaf8df42d9a91 2024-12-09T03:28:08,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8eb04007c0b5443e944eaf8df42d9a91, entries=11, sequenceid=270, filesize=16.4 K 2024-12-09T03:28:08,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 784580dedcd7bf1485608e5fe5f1f043 in 21ms, sequenceid=270, compaction requested=true 2024-12-09T03:28:08,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:08,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 784580dedcd7bf1485608e5fe5f1f043:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:28:08,189 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:08,189 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:28:08,190 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 170054 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:28:08,190 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1541): 784580dedcd7bf1485608e5fe5f1f043/info is initiating minor compaction (all files) 2024-12-09T03:28:08,190 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 784580dedcd7bf1485608e5fe5f1f043/info in TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:08,190 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/1e6a334245b54730bd7e539119946e35, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/b6967f361ccf4e18a31e6b7bb88cd0fd, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8eb04007c0b5443e944eaf8df42d9a91] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp, totalSize=166.1 K 2024-12-09T03:28:08,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-09T03:28:08,191 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1e6a334245b54730bd7e539119946e35, keycount=121, bloomtype=ROW, size=133.2 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733714857719 2024-12-09T03:28:08,191 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting b6967f361ccf4e18a31e6b7bb88cd0fd, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733714886124 2024-12-09T03:28:08,191 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8eb04007c0b5443e944eaf8df42d9a91, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733714888146 2024-12-09T03:28:08,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/a5b040ee43df43c19d09f202a4104c23 is 1080, key is row0205/info:/1733714888168/Put/seqid=0 2024-12-09T03:28:08,210 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 784580dedcd7bf1485608e5fe5f1f043#info#compaction#85 average throughput is 73.37 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:28:08,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741871_1047 (size=16839) 2024-12-09T03:28:08,210 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/787c9a11493e4dc9bef1259c0525e697 is 1080, key is row0062/info:/1733714857719/Put/seqid=0 2024-12-09T03:28:08,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741871_1047 (size=16839) 2024-12-09T03:28:08,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/a5b040ee43df43c19d09f202a4104c23 2024-12-09T03:28:08,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741872_1048 (size=160293) 2024-12-09T03:28:08,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741872_1048 (size=160293) 2024-12-09T03:28:08,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/a5b040ee43df43c19d09f202a4104c23 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a5b040ee43df43c19d09f202a4104c23 2024-12-09T03:28:08,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a5b040ee43df43c19d09f202a4104c23, entries=11, sequenceid=284, filesize=16.4 K 2024-12-09T03:28:08,223 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/787c9a11493e4dc9bef1259c0525e697 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/787c9a11493e4dc9bef1259c0525e697 2024-12-09T03:28:08,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 784580dedcd7bf1485608e5fe5f1f043 in 34ms, sequenceid=284, compaction requested=false 2024-12-09T03:28:08,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:08,232 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 784580dedcd7bf1485608e5fe5f1f043/info of 784580dedcd7bf1485608e5fe5f1f043 into 787c9a11493e4dc9bef1259c0525e697(size=156.5 K), total size for store is 173.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:28:08,232 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:08,232 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., storeName=784580dedcd7bf1485608e5fe5f1f043/info, priority=13, startTime=1733714888189; duration=0sec 2024-12-09T03:28:08,232 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:08,233 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 784580dedcd7bf1485608e5fe5f1f043:info 2024-12-09T03:28:08,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:08,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:09,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:09,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:10,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:10,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-09T03:28:10,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/9db973d7636245349bfdd2d172b4c0b4 is 1080, key is row0216/info:/1733714888191/Put/seqid=0 2024-12-09T03:28:10,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741873_1049 (size=15760) 2024-12-09T03:28:10,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741873_1049 (size=15760) 2024-12-09T03:28:10,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/9db973d7636245349bfdd2d172b4c0b4 2024-12-09T03:28:10,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/9db973d7636245349bfdd2d172b4c0b4 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/9db973d7636245349bfdd2d172b4c0b4 2024-12-09T03:28:10,239 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/9db973d7636245349bfdd2d172b4c0b4, entries=10, sequenceid=298, filesize=15.4 K 2024-12-09T03:28:10,240 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=10.51 KB/10760 for 784580dedcd7bf1485608e5fe5f1f043 in 25ms, sequenceid=298, compaction requested=true 2024-12-09T03:28:10,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:10,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 784580dedcd7bf1485608e5fe5f1f043:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:28:10,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:10,240 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:28:10,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:10,240 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-09T03:28:10,241 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192892 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:28:10,241 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1541): 784580dedcd7bf1485608e5fe5f1f043/info is initiating minor compaction (all files) 2024-12-09T03:28:10,241 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 784580dedcd7bf1485608e5fe5f1f043/info in TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:10,241 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/787c9a11493e4dc9bef1259c0525e697, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a5b040ee43df43c19d09f202a4104c23, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/9db973d7636245349bfdd2d172b4c0b4] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp, totalSize=188.4 K 2024-12-09T03:28:10,242 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 787c9a11493e4dc9bef1259c0525e697, keycount=143, bloomtype=ROW, size=156.5 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733714857719 2024-12-09T03:28:10,242 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting a5b040ee43df43c19d09f202a4104c23, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733714888168 2024-12-09T03:28:10,242 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9db973d7636245349bfdd2d172b4c0b4, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733714888191 2024-12-09T03:28:10,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/2fbd9cc776214529a7ed77e8c11965e5 is 1080, key is row0226/info:/1733714890218/Put/seqid=0 2024-12-09T03:28:10,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741874_1050 (size=16839) 2024-12-09T03:28:10,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741874_1050 (size=16839) 2024-12-09T03:28:10,251 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/2fbd9cc776214529a7ed77e8c11965e5 2024-12-09T03:28:10,258 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 784580dedcd7bf1485608e5fe5f1f043#info#compaction#88 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:28:10,259 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/9c4a029167ca4d779b2c0e2c484afa8b is 1080, key is row0062/info:/1733714857719/Put/seqid=0 2024-12-09T03:28:10,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/2fbd9cc776214529a7ed77e8c11965e5 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2fbd9cc776214529a7ed77e8c11965e5 2024-12-09T03:28:10,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2fbd9cc776214529a7ed77e8c11965e5, entries=11, sequenceid=312, filesize=16.4 K 2024-12-09T03:28:10,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for 784580dedcd7bf1485608e5fe5f1f043 in 31ms, sequenceid=312, compaction requested=false 2024-12-09T03:28:10,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:10,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:10,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-09T03:28:10,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/3594b66c708f4a059a3be0cdc37e36e0 is 1080, key is row0237/info:/1733714890241/Put/seqid=0 2024-12-09T03:28:10,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741875_1051 (size=183058) 2024-12-09T03:28:10,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741876_1052 (size=14681) 2024-12-09T03:28:10,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741875_1051 (size=183058) 2024-12-09T03:28:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741876_1052 (size=14681) 2024-12-09T03:28:10,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/3594b66c708f4a059a3be0cdc37e36e0 2024-12-09T03:28:10,301 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/9c4a029167ca4d779b2c0e2c484afa8b as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/9c4a029167ca4d779b2c0e2c484afa8b 2024-12-09T03:28:10,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/3594b66c708f4a059a3be0cdc37e36e0 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3594b66c708f4a059a3be0cdc37e36e0 2024-12-09T03:28:10,308 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 784580dedcd7bf1485608e5fe5f1f043/info of 784580dedcd7bf1485608e5fe5f1f043 into 9c4a029167ca4d779b2c0e2c484afa8b(size=178.8 K), total size for store is 195.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:28:10,309 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:10,309 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., storeName=784580dedcd7bf1485608e5fe5f1f043/info, priority=13, startTime=1733714890240; duration=0sec 2024-12-09T03:28:10,309 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:10,309 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 784580dedcd7bf1485608e5fe5f1f043:info 2024-12-09T03:28:10,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3594b66c708f4a059a3be0cdc37e36e0, entries=9, sequenceid=324, filesize=14.3 K 2024-12-09T03:28:10,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=10.51 KB/10760 for 784580dedcd7bf1485608e5fe5f1f043 in 38ms, sequenceid=324, compaction requested=true 2024-12-09T03:28:10,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:10,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 784580dedcd7bf1485608e5fe5f1f043:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T03:28:10,313 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T03:28:10,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:10,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44043 {}] regionserver.HRegion(8855): Flush requested on 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:10,315 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 214578 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T03:28:10,315 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1541): 784580dedcd7bf1485608e5fe5f1f043/info is initiating minor compaction (all files) 2024-12-09T03:28:10,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 784580dedcd7bf1485608e5fe5f1f043 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-09T03:28:10,315 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 784580dedcd7bf1485608e5fe5f1f043/info in TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:10,315 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/9c4a029167ca4d779b2c0e2c484afa8b, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2fbd9cc776214529a7ed77e8c11965e5, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3594b66c708f4a059a3be0cdc37e36e0] into tmpdir=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp, totalSize=209.5 K 2024-12-09T03:28:10,316 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9c4a029167ca4d779b2c0e2c484afa8b, keycount=164, bloomtype=ROW, size=178.8 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733714857719 2024-12-09T03:28:10,317 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2fbd9cc776214529a7ed77e8c11965e5, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733714890218 2024-12-09T03:28:10,317 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3594b66c708f4a059a3be0cdc37e36e0, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733714890241 2024-12-09T03:28:10,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/c0cee39dd7cb4da7b68402998f6f0e2d is 1080, key is row0246/info:/1733714890276/Put/seqid=0 2024-12-09T03:28:10,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741877_1053 (size=16839) 2024-12-09T03:28:10,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741877_1053 (size=16839) 2024-12-09T03:28:10,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/c0cee39dd7cb4da7b68402998f6f0e2d 2024-12-09T03:28:10,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/c0cee39dd7cb4da7b68402998f6f0e2d as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/c0cee39dd7cb4da7b68402998f6f0e2d 2024-12-09T03:28:10,345 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 784580dedcd7bf1485608e5fe5f1f043#info#compaction#91 average throughput is 47.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T03:28:10,346 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/2e6c6a7b44cf4d19baf48c14d971f04f is 1080, key is row0062/info:/1733714857719/Put/seqid=0 2024-12-09T03:28:10,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/c0cee39dd7cb4da7b68402998f6f0e2d, entries=11, sequenceid=339, filesize=16.4 K 2024-12-09T03:28:10,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for 784580dedcd7bf1485608e5fe5f1f043 in 38ms, sequenceid=339, compaction requested=false 2024-12-09T03:28:10,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:10,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741878_1054 (size=204817) 2024-12-09T03:28:10,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741878_1054 (size=204817) 2024-12-09T03:28:10,363 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/.tmp/info/2e6c6a7b44cf4d19baf48c14d971f04f as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2e6c6a7b44cf4d19baf48c14d971f04f 2024-12-09T03:28:10,369 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 784580dedcd7bf1485608e5fe5f1f043/info of 784580dedcd7bf1485608e5fe5f1f043 into 2e6c6a7b44cf4d19baf48c14d971f04f(size=200.0 K), total size for store is 216.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T03:28:10,369 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 784580dedcd7bf1485608e5fe5f1f043: 2024-12-09T03:28:10,369 INFO [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., storeName=784580dedcd7bf1485608e5fe5f1f043/info, priority=13, startTime=1733714890313; duration=0sec 2024-12-09T03:28:10,369 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T03:28:10,369 DEBUG [RS:0;1617b0b1421f:44043-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 784580dedcd7bf1485608e5fe5f1f043:info 2024-12-09T03:28:10,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:10,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:11,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:11,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:12,316 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-09T03:28:12,317 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C44043%2C1733714833682.1733714892316 2024-12-09T03:28:12,344 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,344 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,344 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,344 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,344 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,344 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/WALs/1617b0b1421f,44043,1733714833682/1617b0b1421f%2C44043%2C1733714833682.1733714834726 with entries=318, filesize=310.28 KB; new WAL /user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/WALs/1617b0b1421f,44043,1733714833682/1617b0b1421f%2C44043%2C1733714833682.1733714892316 2024-12-09T03:28:12,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741833_1009 (size=317731) 2024-12-09T03:28:12,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741833_1009 (size=317731) 2024-12-09T03:28:12,353 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/WALs/1617b0b1421f,44043,1733714833682/1617b0b1421f%2C44043%2C1733714833682.1733714834726 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/oldWALs/1617b0b1421f%2C44043%2C1733714833682.1733714834726 2024-12-09T03:28:12,353 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35443:35443),(127.0.0.1/127.0.0.1:35319:35319)] 2024-12-09T03:28:12,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T03:28:12,358 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:28:12,358 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:28:12,358 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:28:12,358 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T03:28:12,358 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:28:12,358 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T03:28:12,358 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1664057984, stopped=false 2024-12-09T03:28:12,358 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1617b0b1421f,38129,1733714833477 2024-12-09T03:28:12,546 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:28:12,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:28:12,546 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:12,546 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:28:12,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:12,546 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:28:12,546 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:28:12,546 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:28:12,546 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1617b0b1421f,44043,1733714833682' ***** 2024-12-09T03:28:12,546 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:28:12,547 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:28:12,547 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:28:12,547 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:28:12,547 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:28:12,547 INFO [RS:0;1617b0b1421f:44043 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:28:12,547 INFO [RS:0;1617b0b1421f:44043 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:28:12,547 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(3091): Received CLOSE for 784580dedcd7bf1485608e5fe5f1f043 2024-12-09T03:28:12,547 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(3091): Received CLOSE for e97476aa668f8fba455fa1666a01664d 2024-12-09T03:28:12,547 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(959): stopping server 1617b0b1421f,44043,1733714833682 2024-12-09T03:28:12,548 INFO [RS:0;1617b0b1421f:44043 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:28:12,548 INFO [RS:0;1617b0b1421f:44043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1617b0b1421f:44043. 2024-12-09T03:28:12,548 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 784580dedcd7bf1485608e5fe5f1f043, disabling compactions & flushes 2024-12-09T03:28:12,548 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:12,548 DEBUG [RS:0;1617b0b1421f:44043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:28:12,548 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:12,548 DEBUG [RS:0;1617b0b1421f:44043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:28:12,548 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. after waiting 0 ms 2024-12-09T03:28:12,548 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:12,548 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:28:12,548 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:28:12,548 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:28:12,548 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T03:28:12,548 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-09T03:28:12,548 DEBUG [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(1325): Online Regions={784580dedcd7bf1485608e5fe5f1f043=TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043., 1588230740=hbase:meta,,1.1588230740, e97476aa668f8fba455fa1666a01664d=TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d.} 2024-12-09T03:28:12,548 DEBUG [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 784580dedcd7bf1485608e5fe5f1f043, e97476aa668f8fba455fa1666a01664d 2024-12-09T03:28:12,548 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:28:12,548 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:28:12,549 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:28:12,549 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:28:12,549 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:28:12,549 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-12-09T03:28:12,548 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97->hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/35f784d9e6a74f55a6780bf06c220784-top, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/152ae95d694d45f4aa3d3c5d1cc54255, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/TestLogRolling-testLogRolling=d4e2ff6e64d18ee11bc1992b0fcdca97-be68c05c7c7248c69d89116c8616a615, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/133f3b18373d426f99ceb69ba719b2bc, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/33bc327f958a43b08f7909cbcd2c96ec, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8dbe1819dca648568bb7fe8260ac7c98, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/1f38fba6680942b496096b78387979ae, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/5fed1de5128541ccb78b56228237682f, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/c41d601412d44f61b64e599aba10c768, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/7e41bf462d09486cbb1c86d9b58bf809, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/f0153a54bef54a0ebf84240ee486e4c6, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2847c181e92a4efaa00c44b75e73d568, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a6cbf4e56f84493aa2d7b78daab9e5a9, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2afa38ee869c4366af8aa354fe1543b1, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/ec6c0fb96be24681ad5c187efb9c3a2d, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3254a7700dad4952a513bb7a3d082af2, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/1e6a334245b54730bd7e539119946e35, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/e2750dd033554a2ba57ee0cf9e73d7b3, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/b6967f361ccf4e18a31e6b7bb88cd0fd, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/787c9a11493e4dc9bef1259c0525e697, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8eb04007c0b5443e944eaf8df42d9a91, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a5b040ee43df43c19d09f202a4104c23, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/9c4a029167ca4d779b2c0e2c484afa8b, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/9db973d7636245349bfdd2d172b4c0b4, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2fbd9cc776214529a7ed77e8c11965e5, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3594b66c708f4a059a3be0cdc37e36e0] to archive 2024-12-09T03:28:12,550 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T03:28:12,551 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:28:12,553 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/152ae95d694d45f4aa3d3c5d1cc54255 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/152ae95d694d45f4aa3d3c5d1cc54255 2024-12-09T03:28:12,554 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/info/e9b64981603849ebaf245755a232d8bf is 193, key is TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043./info:regioninfo/1733714868576/Put/seqid=0 2024-12-09T03:28:12,554 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/TestLogRolling-testLogRolling=d4e2ff6e64d18ee11bc1992b0fcdca97-be68c05c7c7248c69d89116c8616a615 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/TestLogRolling-testLogRolling=d4e2ff6e64d18ee11bc1992b0fcdca97-be68c05c7c7248c69d89116c8616a615 2024-12-09T03:28:12,555 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/133f3b18373d426f99ceb69ba719b2bc to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/133f3b18373d426f99ceb69ba719b2bc 2024-12-09T03:28:12,557 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/33bc327f958a43b08f7909cbcd2c96ec to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/33bc327f958a43b08f7909cbcd2c96ec 2024-12-09T03:28:12,558 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8dbe1819dca648568bb7fe8260ac7c98 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8dbe1819dca648568bb7fe8260ac7c98 2024-12-09T03:28:12,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741880_1056 (size=6223) 2024-12-09T03:28:12,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741880_1056 (size=6223) 2024-12-09T03:28:12,559 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/info/e9b64981603849ebaf245755a232d8bf 2024-12-09T03:28:12,559 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/1f38fba6680942b496096b78387979ae to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/1f38fba6680942b496096b78387979ae 2024-12-09T03:28:12,561 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/5fed1de5128541ccb78b56228237682f to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/5fed1de5128541ccb78b56228237682f 2024-12-09T03:28:12,562 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/c41d601412d44f61b64e599aba10c768 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/c41d601412d44f61b64e599aba10c768 2024-12-09T03:28:12,564 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/7e41bf462d09486cbb1c86d9b58bf809 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/7e41bf462d09486cbb1c86d9b58bf809 2024-12-09T03:28:12,565 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/.tmp/info/e9b64981603849ebaf245755a232d8bf as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/info/e9b64981603849ebaf245755a232d8bf 2024-12-09T03:28:12,566 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/f0153a54bef54a0ebf84240ee486e4c6 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/f0153a54bef54a0ebf84240ee486e4c6 2024-12-09T03:28:12,568 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2847c181e92a4efaa00c44b75e73d568 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2847c181e92a4efaa00c44b75e73d568 2024-12-09T03:28:12,569 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a6cbf4e56f84493aa2d7b78daab9e5a9 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a6cbf4e56f84493aa2d7b78daab9e5a9 2024-12-09T03:28:12,571 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2afa38ee869c4366af8aa354fe1543b1 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2afa38ee869c4366af8aa354fe1543b1 2024-12-09T03:28:12,571 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/info/e9b64981603849ebaf245755a232d8bf, entries=5, sequenceid=21, filesize=6.1 K 2024-12-09T03:28:12,572 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 23ms, sequenceid=21, compaction requested=false 2024-12-09T03:28:12,572 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/ec6c0fb96be24681ad5c187efb9c3a2d to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/ec6c0fb96be24681ad5c187efb9c3a2d 2024-12-09T03:28:12,573 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3254a7700dad4952a513bb7a3d082af2 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3254a7700dad4952a513bb7a3d082af2 2024-12-09T03:28:12,575 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/1e6a334245b54730bd7e539119946e35 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/1e6a334245b54730bd7e539119946e35 2024-12-09T03:28:12,576 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-09T03:28:12,576 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/e2750dd033554a2ba57ee0cf9e73d7b3 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/e2750dd033554a2ba57ee0cf9e73d7b3 2024-12-09T03:28:12,577 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:28:12,577 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:28:12,577 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714892548Running coprocessor pre-close hooks at 1733714892548Disabling compacts and flushes for region at 1733714892548Disabling writes for close at 1733714892549 (+1 ms)Obtaining lock to block concurrent updates at 1733714892549Preparing flush snapshotting stores in 1588230740 at 1733714892549Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=705, getHeapSize=2040, getOffHeapSize=0, getCellsCount=5 at 1733714892549Flushing stores of hbase:meta,,1.1588230740 at 1733714892550 (+1 ms)Flushing 1588230740/info: creating writer at 1733714892550Flushing 1588230740/info: appending metadata at 1733714892553 (+3 ms)Flushing 1588230740/info: closing flushed file at 1733714892553Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43ca7020: reopening flushed file at 1733714892565 (+12 ms)Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 23ms, sequenceid=21, compaction requested=false at 1733714892572 (+7 ms)Writing region close event to WAL at 1733714892573 (+1 ms)Running coprocessor post-close hooks at 1733714892577 (+4 ms)Closed at 1733714892577 2024-12-09T03:28:12,577 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T03:28:12,577 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/b6967f361ccf4e18a31e6b7bb88cd0fd to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/b6967f361ccf4e18a31e6b7bb88cd0fd 2024-12-09T03:28:12,579 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/787c9a11493e4dc9bef1259c0525e697 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/787c9a11493e4dc9bef1259c0525e697 2024-12-09T03:28:12,580 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8eb04007c0b5443e944eaf8df42d9a91 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/8eb04007c0b5443e944eaf8df42d9a91 2024-12-09T03:28:12,581 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a5b040ee43df43c19d09f202a4104c23 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/a5b040ee43df43c19d09f202a4104c23 2024-12-09T03:28:12,582 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/9c4a029167ca4d779b2c0e2c484afa8b to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/9c4a029167ca4d779b2c0e2c484afa8b 2024-12-09T03:28:12,583 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/9db973d7636245349bfdd2d172b4c0b4 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/9db973d7636245349bfdd2d172b4c0b4 2024-12-09T03:28:12,584 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2fbd9cc776214529a7ed77e8c11965e5 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/2fbd9cc776214529a7ed77e8c11965e5 2024-12-09T03:28:12,585 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3594b66c708f4a059a3be0cdc37e36e0 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/info/3594b66c708f4a059a3be0cdc37e36e0 2024-12-09T03:28:12,585 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1617b0b1421f:38129 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T03:28:12,586 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [152ae95d694d45f4aa3d3c5d1cc54255=8359, 133f3b18373d426f99ceb69ba719b2bc=12509, 33bc327f958a43b08f7909cbcd2c96ec=31106, 8dbe1819dca648568bb7fe8260ac7c98=20064, 1f38fba6680942b496096b78387979ae=17896, 5fed1de5128541ccb78b56228237682f=57110, c41d601412d44f61b64e599aba10c768=17906, 7e41bf462d09486cbb1c86d9b58bf809=24394, f0153a54bef54a0ebf84240ee486e4c6=84299, 2847c181e92a4efaa00c44b75e73d568=12516, a6cbf4e56f84493aa2d7b78daab9e5a9=24394, 2afa38ee869c4366af8aa354fe1543b1=113515, ec6c0fb96be24681ad5c187efb9c3a2d=14672, 3254a7700dad4952a513bb7a3d082af2=16828, 1e6a334245b54730bd7e539119946e35=136387, e2750dd033554a2ba57ee0cf9e73d7b3=15750, b6967f361ccf4e18a31e6b7bb88cd0fd=16828, 787c9a11493e4dc9bef1259c0525e697=160293, 8eb04007c0b5443e944eaf8df42d9a91=16839, a5b040ee43df43c19d09f202a4104c23=16839, 9c4a029167ca4d779b2c0e2c484afa8b=183058, 9db973d7636245349bfdd2d172b4c0b4=15760, 2fbd9cc776214529a7ed77e8c11965e5=16839, 3594b66c708f4a059a3be0cdc37e36e0=14681] 2024-12-09T03:28:12,589 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T03:28:12,589 INFO [regionserver/1617b0b1421f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T03:28:12,589 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/784580dedcd7bf1485608e5fe5f1f043/recovered.edits/343.seqid, newMaxSeqId=343, maxSeqId=88 2024-12-09T03:28:12,590 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:12,590 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 784580dedcd7bf1485608e5fe5f1f043: Waiting for close lock at 1733714892548Running coprocessor pre-close hooks at 1733714892548Disabling compacts and flushes for region at 1733714892548Disabling writes for close at 1733714892548Writing region close event to WAL at 1733714892586 (+38 ms)Running coprocessor post-close hooks at 1733714892589 (+3 ms)Closed at 1733714892590 (+1 ms) 2024-12-09T03:28:12,590 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733714867866.784580dedcd7bf1485608e5fe5f1f043. 2024-12-09T03:28:12,590 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e97476aa668f8fba455fa1666a01664d, disabling compactions & flushes 2024-12-09T03:28:12,590 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d. 2024-12-09T03:28:12,590 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d. 2024-12-09T03:28:12,590 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d. after waiting 0 ms 2024-12-09T03:28:12,590 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d. 2024-12-09T03:28:12,590 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97->hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/d4e2ff6e64d18ee11bc1992b0fcdca97/info/35f784d9e6a74f55a6780bf06c220784-bottom] to archive 2024-12-09T03:28:12,591 INFO [regionserver/1617b0b1421f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:28:12,591 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T03:28:12,593 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97 to hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/archive/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/info/35f784d9e6a74f55a6780bf06c220784.d4e2ff6e64d18ee11bc1992b0fcdca97 2024-12-09T03:28:12,593 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-09T03:28:12,597 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/data/default/TestLogRolling-testLogRolling/e97476aa668f8fba455fa1666a01664d/recovered.edits/92.seqid, newMaxSeqId=92, maxSeqId=88 2024-12-09T03:28:12,597 INFO [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d. 2024-12-09T03:28:12,597 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e97476aa668f8fba455fa1666a01664d: Waiting for close lock at 1733714892590Running coprocessor pre-close hooks at 1733714892590Disabling compacts and flushes for region at 1733714892590Disabling writes for close at 1733714892590Writing region close event to WAL at 1733714892593 (+3 ms)Running coprocessor post-close hooks at 1733714892597 (+4 ms)Closed at 1733714892597 2024-12-09T03:28:12,598 DEBUG [RS_CLOSE_REGION-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733714867866.e97476aa668f8fba455fa1666a01664d. 2024-12-09T03:28:12,748 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(976): stopping server 1617b0b1421f,44043,1733714833682; all regions closed. 2024-12-09T03:28:12,749 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,749 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,749 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,750 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,750 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741834_1010 (size=8107) 2024-12-09T03:28:12,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741834_1010 (size=8107) 2024-12-09T03:28:12,756 DEBUG [RS:0;1617b0b1421f:44043 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/oldWALs 2024-12-09T03:28:12,756 INFO [RS:0;1617b0b1421f:44043 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C44043%2C1733714833682.meta:.meta(num 1733714835172) 2024-12-09T03:28:12,757 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,757 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,757 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,757 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:12,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:12,758 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741879_1055 (size=778) 2024-12-09T03:28:12,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741879_1055 (size=778) 2024-12-09T03:28:12,762 DEBUG [RS:0;1617b0b1421f:44043 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/oldWALs 2024-12-09T03:28:12,762 INFO [RS:0;1617b0b1421f:44043 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C44043%2C1733714833682:(num 1733714892316) 2024-12-09T03:28:12,762 DEBUG [RS:0;1617b0b1421f:44043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:28:12,762 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:28:12,762 INFO [RS:0;1617b0b1421f:44043 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:28:12,762 INFO [RS:0;1617b0b1421f:44043 {}] hbase.ChoreService(370): Chore service for: regionserver/1617b0b1421f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T03:28:12,763 INFO [RS:0;1617b0b1421f:44043 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:28:12,763 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:28:12,763 INFO [RS:0;1617b0b1421f:44043 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44043 2024-12-09T03:28:12,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:28:12,766 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1617b0b1421f,44043,1733714833682 2024-12-09T03:28:12,766 INFO [RS:0;1617b0b1421f:44043 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:28:12,777 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1617b0b1421f,44043,1733714833682] 2024-12-09T03:28:12,787 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1617b0b1421f,44043,1733714833682 already deleted, retry=false 2024-12-09T03:28:12,787 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1617b0b1421f,44043,1733714833682 expired; onlineServers=0 2024-12-09T03:28:12,788 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1617b0b1421f,38129,1733714833477' ***** 2024-12-09T03:28:12,788 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T03:28:12,788 INFO [M:0;1617b0b1421f:38129 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:28:12,788 INFO [M:0;1617b0b1421f:38129 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:28:12,788 DEBUG [M:0;1617b0b1421f:38129 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T03:28:12,788 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T03:28:12,788 DEBUG [M:0;1617b0b1421f:38129 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T03:28:12,788 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714834505 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714834505,5,FailOnTimeoutGroup] 2024-12-09T03:28:12,788 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714834505 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714834505,5,FailOnTimeoutGroup] 2024-12-09T03:28:12,788 INFO [M:0;1617b0b1421f:38129 {}] hbase.ChoreService(370): Chore service for: master/1617b0b1421f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T03:28:12,788 INFO [M:0;1617b0b1421f:38129 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:28:12,788 DEBUG [M:0;1617b0b1421f:38129 {}] master.HMaster(1795): Stopping service threads 2024-12-09T03:28:12,788 INFO [M:0;1617b0b1421f:38129 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T03:28:12,788 INFO [M:0;1617b0b1421f:38129 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:28:12,789 INFO [M:0;1617b0b1421f:38129 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T03:28:12,789 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T03:28:12,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T03:28:12,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:12,798 DEBUG [M:0;1617b0b1421f:38129 {}] zookeeper.ZKUtil(347): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T03:28:12,798 WARN [M:0;1617b0b1421f:38129 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T03:28:12,799 INFO [M:0;1617b0b1421f:38129 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/.lastflushedseqids 2024-12-09T03:28:12,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741881_1057 (size=228) 2024-12-09T03:28:12,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741881_1057 (size=228) 2024-12-09T03:28:12,805 INFO [M:0;1617b0b1421f:38129 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T03:28:12,805 INFO [M:0;1617b0b1421f:38129 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T03:28:12,806 DEBUG [M:0;1617b0b1421f:38129 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:28:12,806 INFO [M:0;1617b0b1421f:38129 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:12,806 DEBUG [M:0;1617b0b1421f:38129 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:12,806 DEBUG [M:0;1617b0b1421f:38129 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:28:12,806 DEBUG [M:0;1617b0b1421f:38129 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:12,806 INFO [M:0;1617b0b1421f:38129 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.23 KB heapSize=62.74 KB 2024-12-09T03:28:12,825 DEBUG [M:0;1617b0b1421f:38129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/df3ec7da6183422da311022c3b9f7fa7 is 82, key is hbase:meta,,1/info:regioninfo/1733714835198/Put/seqid=0 2024-12-09T03:28:12,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741882_1058 (size=5672) 2024-12-09T03:28:12,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741882_1058 (size=5672) 2024-12-09T03:28:12,829 INFO [M:0;1617b0b1421f:38129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/df3ec7da6183422da311022c3b9f7fa7 2024-12-09T03:28:12,846 DEBUG [M:0;1617b0b1421f:38129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/932b29614cf0498e8446c23e04c0adb0 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733714835822/Put/seqid=0 2024-12-09T03:28:12,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741883_1059 (size=7088) 2024-12-09T03:28:12,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741883_1059 (size=7088) 2024-12-09T03:28:12,850 INFO [M:0;1617b0b1421f:38129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.63 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/932b29614cf0498e8446c23e04c0adb0 2024-12-09T03:28:12,854 INFO [M:0;1617b0b1421f:38129 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 932b29614cf0498e8446c23e04c0adb0 2024-12-09T03:28:12,866 DEBUG [M:0;1617b0b1421f:38129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d77bdda283f4709a2e5e5b8ae0e2da2 is 69, key is 1617b0b1421f,44043,1733714833682/rs:state/1733714834566/Put/seqid=0 2024-12-09T03:28:12,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741884_1060 (size=5156) 2024-12-09T03:28:12,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741884_1060 (size=5156) 2024-12-09T03:28:12,871 INFO [M:0;1617b0b1421f:38129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d77bdda283f4709a2e5e5b8ae0e2da2 2024-12-09T03:28:12,877 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:28:12,877 DEBUG [pool-823-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44043-0x100089cf6ff0001, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:28:12,877 INFO [RS:0;1617b0b1421f:44043 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:28:12,877 INFO [RS:0;1617b0b1421f:44043 {}] regionserver.HRegionServer(1031): Exiting; stopping=1617b0b1421f,44043,1733714833682; zookeeper connection closed. 2024-12-09T03:28:12,878 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@49a8a9ce {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@49a8a9ce 2024-12-09T03:28:12,878 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T03:28:12,891 DEBUG [M:0;1617b0b1421f:38129 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ab58c39d6a584e0983a56ec99cb71358 is 52, key is load_balancer_on/state:d/1733714835445/Put/seqid=0 2024-12-09T03:28:12,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741885_1061 (size=5056) 2024-12-09T03:28:12,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741885_1061 (size=5056) 2024-12-09T03:28:12,896 INFO [M:0;1617b0b1421f:38129 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ab58c39d6a584e0983a56ec99cb71358 2024-12-09T03:28:12,901 DEBUG [M:0;1617b0b1421f:38129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/df3ec7da6183422da311022c3b9f7fa7 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/df3ec7da6183422da311022c3b9f7fa7 2024-12-09T03:28:12,905 INFO [M:0;1617b0b1421f:38129 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/df3ec7da6183422da311022c3b9f7fa7, entries=8, sequenceid=119, filesize=5.5 K 2024-12-09T03:28:12,906 DEBUG [M:0;1617b0b1421f:38129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/932b29614cf0498e8446c23e04c0adb0 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/932b29614cf0498e8446c23e04c0adb0 2024-12-09T03:28:12,911 INFO [M:0;1617b0b1421f:38129 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 932b29614cf0498e8446c23e04c0adb0 2024-12-09T03:28:12,911 INFO [M:0;1617b0b1421f:38129 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/932b29614cf0498e8446c23e04c0adb0, entries=13, sequenceid=119, filesize=6.9 K 2024-12-09T03:28:12,912 DEBUG [M:0;1617b0b1421f:38129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0d77bdda283f4709a2e5e5b8ae0e2da2 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0d77bdda283f4709a2e5e5b8ae0e2da2 2024-12-09T03:28:12,916 INFO [M:0;1617b0b1421f:38129 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0d77bdda283f4709a2e5e5b8ae0e2da2, entries=1, sequenceid=119, filesize=5.0 K 2024-12-09T03:28:12,917 DEBUG [M:0;1617b0b1421f:38129 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ab58c39d6a584e0983a56ec99cb71358 as hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ab58c39d6a584e0983a56ec99cb71358 2024-12-09T03:28:12,923 INFO [M:0;1617b0b1421f:38129 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34819/user/jenkins/test-data/df8caee3-ec2f-977b-1430-e95202e4f115/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ab58c39d6a584e0983a56ec99cb71358, entries=1, sequenceid=119, filesize=4.9 K 2024-12-09T03:28:12,924 INFO [M:0;1617b0b1421f:38129 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.23 KB/52459, heapSize ~62.68 KB/64184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=119, compaction requested=false 2024-12-09T03:28:12,925 INFO [M:0;1617b0b1421f:38129 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:12,925 DEBUG [M:0;1617b0b1421f:38129 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714892805Disabling compacts and flushes for region at 1733714892805Disabling writes for close at 1733714892806 (+1 ms)Obtaining lock to block concurrent updates at 1733714892806Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733714892806Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52459, getHeapSize=64184, getOffHeapSize=0, getCellsCount=142 at 1733714892806Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733714892807 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733714892807Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733714892824 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733714892824Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733714892833 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733714892845 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733714892845Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733714892854 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733714892866 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733714892866Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733714892876 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733714892890 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733714892891 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7825ebc0: reopening flushed file at 1733714892900 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@311238b9: reopening flushed file at 1733714892905 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27be6bfb: reopening flushed file at 1733714892911 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4449e04c: reopening flushed file at 1733714892916 (+5 ms)Finished flush of dataSize ~51.23 KB/52459, heapSize ~62.68 KB/64184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=119, compaction requested=false at 1733714892924 (+8 ms)Writing region close event to WAL at 1733714892925 (+1 ms)Closed at 1733714892925 2024-12-09T03:28:12,926 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,926 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,926 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,926 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,926 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:12,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33847 is added to blk_1073741830_1006 (size=60744) 2024-12-09T03:28:12,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45573 is added to blk_1073741830_1006 (size=60744) 2024-12-09T03:28:12,929 INFO [M:0;1617b0b1421f:38129 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T03:28:12,929 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:28:12,929 INFO [M:0;1617b0b1421f:38129 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38129 2024-12-09T03:28:12,929 INFO [M:0;1617b0b1421f:38129 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:28:13,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:28:13,035 INFO [M:0;1617b0b1421f:38129 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:28:13,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38129-0x100089cf6ff0000, quorum=127.0.0.1:61967, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:28:13,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e87fea8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:28:13,065 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@604cd81b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:28:13,065 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:28:13,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63b7be59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:28:13,066 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dce6fab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/hadoop.log.dir/,STOPPED} 2024-12-09T03:28:13,067 WARN [BP-1650772398-172.17.0.3-1733714831059 heartbeating to localhost/127.0.0.1:34819 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:28:13,067 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:28:13,067 WARN [BP-1650772398-172.17.0.3-1733714831059 heartbeating to localhost/127.0.0.1:34819 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1650772398-172.17.0.3-1733714831059 (Datanode Uuid 9a026953-f60e-43b6-8ee1-5585f52ca9da) service to localhost/127.0.0.1:34819 2024-12-09T03:28:13,067 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:28:13,067 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda/data/data3/current/BP-1650772398-172.17.0.3-1733714831059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:28:13,068 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda/data/data4/current/BP-1650772398-172.17.0.3-1733714831059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:28:13,068 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:28:13,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1102e2d6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:28:13,070 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1cb72b8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:28:13,070 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:28:13,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@256a17a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:28:13,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5284eb61{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/hadoop.log.dir/,STOPPED} 2024-12-09T03:28:13,071 WARN [BP-1650772398-172.17.0.3-1733714831059 heartbeating to localhost/127.0.0.1:34819 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:28:13,071 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:28:13,071 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:28:13,071 WARN [BP-1650772398-172.17.0.3-1733714831059 heartbeating to localhost/127.0.0.1:34819 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1650772398-172.17.0.3-1733714831059 (Datanode Uuid daf18ddc-37d6-458f-bb9e-e23f6bbaa620) service to localhost/127.0.0.1:34819 2024-12-09T03:28:13,072 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda/data/data1/current/BP-1650772398-172.17.0.3-1733714831059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:28:13,072 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/cluster_f9ae14a1-f102-7d81-40fc-d07476c67dda/data/data2/current/BP-1650772398-172.17.0.3-1733714831059 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:28:13,072 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:28:13,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6fbe59a1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:28:13,079 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5712bc9d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:28:13,079 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:28:13,079 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d4e3343{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:28:13,079 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43d8bd5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/hadoop.log.dir/,STOPPED} 2024-12-09T03:28:13,086 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T03:28:13,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T03:28:13,129 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=227 (was 206) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:34819 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34819 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:34819 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:34819 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:34819 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34819 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34819 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34819 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=523 (was 486) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=202 (was 162) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4942 (was 5194) 2024-12-09T03:28:13,138 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=227, OpenFileDescriptor=523, MaxFileDescriptor=1048576, SystemLoadAverage=202, ProcessCount=11, AvailableMemoryMB=4942 2024-12-09T03:28:13,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T03:28:13,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/hadoop.log.dir so I do NOT create it in target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba 2024-12-09T03:28:13,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/730d711b-3653-e8d2-2fbb-baecfe746879/hadoop.tmp.dir so I do NOT create it in target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba 2024-12-09T03:28:13,139 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4, deleteOnExit=true 2024-12-09T03:28:13,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T03:28:13,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/test.cache.data in system properties and HBase conf 2024-12-09T03:28:13,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T03:28:13,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/hadoop.log.dir in system properties and HBase conf 2024-12-09T03:28:13,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T03:28:13,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T03:28:13,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T03:28:13,139 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/nfs.dump.dir in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/java.io.tmpdir in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T03:28:13,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T03:28:13,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T03:28:13,153 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:28:13,509 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:28:13,512 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:28:13,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:28:13,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:28:13,521 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T03:28:13,522 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:28:13,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27d58507{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:28:13,523 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ab0454{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:28:13,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d0e457e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/java.io.tmpdir/jetty-localhost-35731-hadoop-hdfs-3_4_1-tests_jar-_-any-17012227283845185886/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:28:13,629 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b726df5{HTTP/1.1, (http/1.1)}{localhost:35731} 2024-12-09T03:28:13,629 INFO [Time-limited test {}] server.Server(415): Started @313879ms 2024-12-09T03:28:13,642 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T03:28:13,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:13,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:13,890 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:28:13,893 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:28:13,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:28:13,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:28:13,894 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:28:13,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d8843b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:28:13,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bc56624{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:28:14,011 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4d7b0249{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/java.io.tmpdir/jetty-localhost-37313-hadoop-hdfs-3_4_1-tests_jar-_-any-6720776180658160724/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:28:14,011 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@71d7fc74{HTTP/1.1, (http/1.1)}{localhost:37313} 2024-12-09T03:28:14,011 INFO [Time-limited test {}] server.Server(415): Started @314261ms 2024-12-09T03:28:14,012 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:28:14,040 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T03:28:14,043 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T03:28:14,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T03:28:14,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T03:28:14,044 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T03:28:14,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3353f5c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/hadoop.log.dir/,AVAILABLE} 2024-12-09T03:28:14,044 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b6a009{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T03:28:14,144 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67700d38{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/java.io.tmpdir/jetty-localhost-44503-hadoop-hdfs-3_4_1-tests_jar-_-any-16296265290401045154/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:28:14,145 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@714fc7b6{HTTP/1.1, (http/1.1)}{localhost:44503} 2024-12-09T03:28:14,145 INFO [Time-limited test {}] server.Server(415): Started @314395ms 2024-12-09T03:28:14,146 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T03:28:14,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:14,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:14,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:14,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,158 WARN [Thread-2502 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4/data/data2/current/BP-1533848234-172.17.0.3-1733714893156/current, will proceed with Du for space computation calculation, 2024-12-09T03:28:15,158 WARN [Thread-2501 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4/data/data1/current/BP-1533848234-172.17.0.3-1733714893156/current, will proceed with Du for space computation calculation, 2024-12-09T03:28:15,175 WARN [Thread-2465 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:28:15,177 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x36f2c31578fea37a with lease ID 0x3e7fbf92422093f: Processing first storage report for DS-ea4e51ad-1cfe-4741-aef3-bc946b0a3283 from datanode DatanodeRegistration(127.0.0.1:36305, datanodeUuid=802bea98-2a52-4677-b07e-941d6c442098, infoPort=45495, infoSecurePort=0, ipcPort=43275, storageInfo=lv=-57;cid=testClusterID;nsid=538826914;c=1733714893156) 2024-12-09T03:28:15,177 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x36f2c31578fea37a with lease ID 0x3e7fbf92422093f: from storage DS-ea4e51ad-1cfe-4741-aef3-bc946b0a3283 node DatanodeRegistration(127.0.0.1:36305, datanodeUuid=802bea98-2a52-4677-b07e-941d6c442098, infoPort=45495, infoSecurePort=0, ipcPort=43275, storageInfo=lv=-57;cid=testClusterID;nsid=538826914;c=1733714893156), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:28:15,177 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x36f2c31578fea37a with lease ID 0x3e7fbf92422093f: Processing first storage report for DS-4cc0bb6d-c844-4f68-ad01-4b2e21203f5a from datanode DatanodeRegistration(127.0.0.1:36305, datanodeUuid=802bea98-2a52-4677-b07e-941d6c442098, infoPort=45495, infoSecurePort=0, ipcPort=43275, storageInfo=lv=-57;cid=testClusterID;nsid=538826914;c=1733714893156) 2024-12-09T03:28:15,177 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x36f2c31578fea37a with lease ID 0x3e7fbf92422093f: from storage DS-4cc0bb6d-c844-4f68-ad01-4b2e21203f5a node DatanodeRegistration(127.0.0.1:36305, datanodeUuid=802bea98-2a52-4677-b07e-941d6c442098, infoPort=45495, infoSecurePort=0, ipcPort=43275, storageInfo=lv=-57;cid=testClusterID;nsid=538826914;c=1733714893156), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:28:15,296 WARN [Thread-2512 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4/data/data3/current/BP-1533848234-172.17.0.3-1733714893156/current, will proceed with Du for space computation calculation, 2024-12-09T03:28:15,296 WARN [Thread-2513 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4/data/data4/current/BP-1533848234-172.17.0.3-1733714893156/current, will proceed with Du for space computation calculation, 2024-12-09T03:28:15,317 WARN [Thread-2488 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T03:28:15,319 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc8a8799e8274a5e7 with lease ID 0x3e7fbf924220940: Processing first storage report for DS-4856cf6e-ab38-4523-aeff-a9fd9c334891 from datanode DatanodeRegistration(127.0.0.1:35573, datanodeUuid=b4a0c22b-5aeb-4ff9-a81c-81a4dd6f49ec, infoPort=41765, infoSecurePort=0, ipcPort=38797, storageInfo=lv=-57;cid=testClusterID;nsid=538826914;c=1733714893156) 2024-12-09T03:28:15,319 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc8a8799e8274a5e7 with lease ID 0x3e7fbf924220940: from storage DS-4856cf6e-ab38-4523-aeff-a9fd9c334891 node DatanodeRegistration(127.0.0.1:35573, datanodeUuid=b4a0c22b-5aeb-4ff9-a81c-81a4dd6f49ec, infoPort=41765, infoSecurePort=0, ipcPort=38797, storageInfo=lv=-57;cid=testClusterID;nsid=538826914;c=1733714893156), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:28:15,319 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc8a8799e8274a5e7 with lease ID 0x3e7fbf924220940: Processing first storage report for DS-1abc4bf5-3375-48c2-a723-0412055057b6 from datanode DatanodeRegistration(127.0.0.1:35573, datanodeUuid=b4a0c22b-5aeb-4ff9-a81c-81a4dd6f49ec, infoPort=41765, infoSecurePort=0, ipcPort=38797, storageInfo=lv=-57;cid=testClusterID;nsid=538826914;c=1733714893156) 2024-12-09T03:28:15,319 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc8a8799e8274a5e7 with lease ID 0x3e7fbf924220940: from storage DS-1abc4bf5-3375-48c2-a723-0412055057b6 node DatanodeRegistration(127.0.0.1:35573, datanodeUuid=b4a0c22b-5aeb-4ff9-a81c-81a4dd6f49ec, infoPort=41765, infoSecurePort=0, ipcPort=38797, storageInfo=lv=-57;cid=testClusterID;nsid=538826914;c=1733714893156), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T03:28:15,361 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T03:28:15,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,364 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,373 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba 2024-12-09T03:28:15,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,386 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4/zookeeper_0, clientPort=59403, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T03:28:15,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,387 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59403 2024-12-09T03:28:15,387 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:28:15,389 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:28:15,391 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,391 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T03:28:15,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:28:15,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741825_1001 (size=7) 2024-12-09T03:28:15,407 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769 with version=8 2024-12-09T03:28:15,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:40037/user/jenkins/test-data/a07641ad-6d21-89f9-0471-9e9e26c99a66/hbase-staging 2024-12-09T03:28:15,409 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:28:15,410 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:28:15,410 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:28:15,410 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:28:15,410 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:28:15,410 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:28:15,410 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T03:28:15,410 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:28:15,411 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41295 2024-12-09T03:28:15,412 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41295 connecting to ZooKeeper ensemble=127.0.0.1:59403 2024-12-09T03:28:15,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:412950x0, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:28:15,475 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41295-0x100089de8d50000 connected 2024-12-09T03:28:15,556 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:28:15,558 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:28:15,561 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:28:15,562 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769, hbase.cluster.distributed=false 2024-12-09T03:28:15,564 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:28:15,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41295 2024-12-09T03:28:15,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41295 2024-12-09T03:28:15,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41295 2024-12-09T03:28:15,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41295 2024-12-09T03:28:15,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41295 2024-12-09T03:28:15,581 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1617b0b1421f:0 server-side Connection retries=45 2024-12-09T03:28:15,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:28:15,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T03:28:15,581 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T03:28:15,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T03:28:15,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T03:28:15,581 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T03:28:15,581 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T03:28:15,582 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46829 2024-12-09T03:28:15,582 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46829 connecting to ZooKeeper ensemble=127.0.0.1:59403 2024-12-09T03:28:15,583 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:28:15,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:28:15,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:468290x0, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T03:28:15,598 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:468290x0, quorum=127.0.0.1:59403, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:28:15,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46829-0x100089de8d50001 connected 2024-12-09T03:28:15,599 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T03:28:15,599 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T03:28:15,600 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T03:28:15,601 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T03:28:15,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46829 2024-12-09T03:28:15,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46829 2024-12-09T03:28:15,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46829 2024-12-09T03:28:15,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46829 2024-12-09T03:28:15,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46829 2024-12-09T03:28:15,613 DEBUG [M:0;1617b0b1421f:41295 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1617b0b1421f:41295 2024-12-09T03:28:15,613 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1617b0b1421f,41295,1733714895409 2024-12-09T03:28:15,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:28:15,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:28:15,619 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1617b0b1421f,41295,1733714895409 2024-12-09T03:28:15,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T03:28:15,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:15,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:15,630 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T03:28:15,631 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1617b0b1421f,41295,1733714895409 from backup master directory 2024-12-09T03:28:15,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:28:15,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1617b0b1421f,41295,1733714895409 2024-12-09T03:28:15,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T03:28:15,640 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:28:15,640 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1617b0b1421f,41295,1733714895409 2024-12-09T03:28:15,645 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/hbase.id] with ID: a4500d42-0515-43dc-b2ed-2ccc01700c24 2024-12-09T03:28:15,645 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/.tmp/hbase.id 2024-12-09T03:28:15,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:28:15,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741826_1002 (size=42) 2024-12-09T03:28:15,660 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/.tmp/hbase.id]:[hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/hbase.id] 2024-12-09T03:28:15,671 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:28:15,672 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T03:28:15,673 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T03:28:15,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:15,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:15,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:28:15,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741827_1003 (size=196) 2024-12-09T03:28:15,688 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T03:28:15,689 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T03:28:15,689 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:28:15,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:28:15,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741828_1004 (size=1189) 2024-12-09T03:28:15,703 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store 2024-12-09T03:28:15,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:28:15,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741829_1005 (size=34) 2024-12-09T03:28:15,710 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:28:15,710 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:28:15,710 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:15,710 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:15,710 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:28:15,710 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:15,710 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:15,710 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714895710Disabling compacts and flushes for region at 1733714895710Disabling writes for close at 1733714895710Writing region close event to WAL at 1733714895710Closed at 1733714895710 2024-12-09T03:28:15,711 WARN [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/.initializing 2024-12-09T03:28:15,711 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/WALs/1617b0b1421f,41295,1733714895409 2024-12-09T03:28:15,714 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C41295%2C1733714895409, suffix=, logDir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/WALs/1617b0b1421f,41295,1733714895409, archiveDir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/oldWALs, maxLogs=10 2024-12-09T03:28:15,714 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C41295%2C1733714895409.1733714895714 2024-12-09T03:28:15,722 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/WALs/1617b0b1421f,41295,1733714895409/1617b0b1421f%2C41295%2C1733714895409.1733714895714 2024-12-09T03:28:15,722 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45495:45495),(127.0.0.1/127.0.0.1:41765:41765)] 2024-12-09T03:28:15,723 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:28:15,723 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:28:15,723 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,723 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,725 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T03:28:15,726 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:15,727 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:28:15,727 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,728 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T03:28:15,728 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:15,729 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:28:15,729 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,730 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T03:28:15,730 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:15,730 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:28:15,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,732 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T03:28:15,732 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:15,732 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T03:28:15,732 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,733 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,733 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,734 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,734 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,735 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T03:28:15,736 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T03:28:15,745 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:28:15,746 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759667, jitterRate=-0.03403441607952118}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T03:28:15,746 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733714895723Initializing all the Stores at 1733714895724 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714895724Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714895724Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714895724Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714895724Cleaning up temporary data from old regions at 1733714895734 (+10 ms)Region opened successfully at 1733714895746 (+12 ms) 2024-12-09T03:28:15,747 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T03:28:15,753 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21524efe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:28:15,754 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T03:28:15,755 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T03:28:15,755 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T03:28:15,755 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T03:28:15,755 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T03:28:15,756 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T03:28:15,756 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T03:28:15,758 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T03:28:15,759 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T03:28:15,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:15,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:15,766 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T03:28:15,767 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T03:28:15,767 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T03:28:15,777 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T03:28:15,777 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T03:28:15,778 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T03:28:15,787 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T03:28:15,789 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T03:28:15,798 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T03:28:15,800 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T03:28:15,808 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T03:28:15,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:28:15,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T03:28:15,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:15,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:15,819 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1617b0b1421f,41295,1733714895409, sessionid=0x100089de8d50000, setting cluster-up flag (Was=false) 2024-12-09T03:28:15,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:15,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:15,872 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T03:28:15,873 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,41295,1733714895409 2024-12-09T03:28:15,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:15,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:15,924 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T03:28:15,926 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1617b0b1421f,41295,1733714895409 2024-12-09T03:28:15,928 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T03:28:15,930 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T03:28:15,930 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T03:28:15,931 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T03:28:15,931 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1617b0b1421f,41295,1733714895409 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T03:28:15,932 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:28:15,932 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:28:15,932 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:28:15,932 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1617b0b1421f:0, corePoolSize=5, maxPoolSize=5 2024-12-09T03:28:15,933 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1617b0b1421f:0, corePoolSize=10, maxPoolSize=10 2024-12-09T03:28:15,933 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:15,933 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:28:15,933 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:15,934 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733714925934 2024-12-09T03:28:15,934 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T03:28:15,934 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T03:28:15,934 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T03:28:15,934 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T03:28:15,934 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T03:28:15,934 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T03:28:15,935 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:15,935 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:28:15,935 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T03:28:15,935 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T03:28:15,935 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T03:28:15,935 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T03:28:15,935 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T03:28:15,935 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T03:28:15,936 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714895936,5,FailOnTimeoutGroup] 2024-12-09T03:28:15,936 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:15,936 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714895936,5,FailOnTimeoutGroup] 2024-12-09T03:28:15,936 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:15,936 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T03:28:15,936 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T03:28:15,936 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:15,936 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:15,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:28:15,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741831_1007 (size=1321) 2024-12-09T03:28:15,942 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T03:28:15,942 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769 2024-12-09T03:28:15,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:28:15,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741832_1008 (size=32) 2024-12-09T03:28:15,948 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:28:15,949 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:28:15,950 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:28:15,950 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:15,951 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:28:15,951 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:28:15,952 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:28:15,952 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:15,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:28:15,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:28:15,954 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:28:15,954 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:15,954 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:28:15,954 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:28:15,955 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:28:15,955 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:15,956 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:28:15,956 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:28:15,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/1588230740 2024-12-09T03:28:15,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/1588230740 2024-12-09T03:28:15,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:28:15,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:28:15,958 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:28:15,959 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:28:15,961 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T03:28:15,962 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815347, jitterRate=0.036767423152923584}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:28:15,962 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733714895948Initializing all the Stores at 1733714895949 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714895949Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714895949Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714895949Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714895949Cleaning up temporary data from old regions at 1733714895958 (+9 ms)Region opened successfully at 1733714895962 (+4 ms) 2024-12-09T03:28:15,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:28:15,963 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:28:15,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:28:15,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:28:15,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:28:15,963 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:28:15,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714895963Disabling compacts and flushes for region at 1733714895963Disabling writes for close at 1733714895963Writing region close event to WAL at 1733714895963Closed at 1733714895963 2024-12-09T03:28:15,964 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:28:15,964 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T03:28:15,965 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T03:28:15,966 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:28:15,967 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T03:28:16,004 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(746): ClusterId : a4500d42-0515-43dc-b2ed-2ccc01700c24 2024-12-09T03:28:16,004 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T03:28:16,009 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T03:28:16,009 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T03:28:16,020 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T03:28:16,020 DEBUG [RS:0;1617b0b1421f:46829 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65cdf6c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1617b0b1421f/172.17.0.3:0 2024-12-09T03:28:16,032 DEBUG [RS:0;1617b0b1421f:46829 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1617b0b1421f:46829 2024-12-09T03:28:16,032 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T03:28:16,032 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T03:28:16,032 DEBUG [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T03:28:16,033 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(2659): reportForDuty to master=1617b0b1421f,41295,1733714895409 with port=46829, startcode=1733714895580 2024-12-09T03:28:16,033 DEBUG [RS:0;1617b0b1421f:46829 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T03:28:16,035 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57801, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T03:28:16,036 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41295 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1617b0b1421f,46829,1733714895580 2024-12-09T03:28:16,036 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41295 {}] master.ServerManager(517): Registering regionserver=1617b0b1421f,46829,1733714895580 2024-12-09T03:28:16,037 DEBUG [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769 2024-12-09T03:28:16,037 DEBUG [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33195 2024-12-09T03:28:16,037 DEBUG [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T03:28:16,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:28:16,040 DEBUG [RS:0;1617b0b1421f:46829 {}] zookeeper.ZKUtil(111): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1617b0b1421f,46829,1733714895580 2024-12-09T03:28:16,040 WARN [RS:0;1617b0b1421f:46829 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T03:28:16,040 INFO [RS:0;1617b0b1421f:46829 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:28:16,040 DEBUG [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/WALs/1617b0b1421f,46829,1733714895580 2024-12-09T03:28:16,041 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1617b0b1421f,46829,1733714895580] 2024-12-09T03:28:16,043 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T03:28:16,044 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T03:28:16,045 INFO [RS:0;1617b0b1421f:46829 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T03:28:16,045 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,045 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T03:28:16,046 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T03:28:16,046 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1617b0b1421f:0, corePoolSize=2, maxPoolSize=2 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1617b0b1421f:0, corePoolSize=1, maxPoolSize=1 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:28:16,046 DEBUG [RS:0;1617b0b1421f:46829 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1617b0b1421f:0, corePoolSize=3, maxPoolSize=3 2024-12-09T03:28:16,047 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,047 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,047 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,047 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,047 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,047 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,46829,1733714895580-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:28:16,062 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T03:28:16,062 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,46829,1733714895580-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,062 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,062 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.Replication(171): 1617b0b1421f,46829,1733714895580 started 2024-12-09T03:28:16,075 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,075 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(1482): Serving as 1617b0b1421f,46829,1733714895580, RpcServer on 1617b0b1421f/172.17.0.3:46829, sessionid=0x100089de8d50001 2024-12-09T03:28:16,076 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T03:28:16,076 DEBUG [RS:0;1617b0b1421f:46829 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1617b0b1421f,46829,1733714895580 2024-12-09T03:28:16,076 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,46829,1733714895580' 2024-12-09T03:28:16,076 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T03:28:16,076 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T03:28:16,076 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T03:28:16,076 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T03:28:16,076 DEBUG [RS:0;1617b0b1421f:46829 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1617b0b1421f,46829,1733714895580 2024-12-09T03:28:16,076 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1617b0b1421f,46829,1733714895580' 2024-12-09T03:28:16,076 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T03:28:16,077 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T03:28:16,077 DEBUG [RS:0;1617b0b1421f:46829 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T03:28:16,077 INFO [RS:0;1617b0b1421f:46829 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T03:28:16,077 INFO [RS:0;1617b0b1421f:46829 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T03:28:16,117 WARN [1617b0b1421f:41295 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T03:28:16,179 INFO [RS:0;1617b0b1421f:46829 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C46829%2C1733714895580, suffix=, logDir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/WALs/1617b0b1421f,46829,1733714895580, archiveDir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/oldWALs, maxLogs=32 2024-12-09T03:28:16,179 INFO [RS:0;1617b0b1421f:46829 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C46829%2C1733714895580.1733714896179 2024-12-09T03:28:16,188 INFO [RS:0;1617b0b1421f:46829 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/WALs/1617b0b1421f,46829,1733714895580/1617b0b1421f%2C46829%2C1733714895580.1733714896179 2024-12-09T03:28:16,192 DEBUG [RS:0;1617b0b1421f:46829 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41765:41765),(127.0.0.1/127.0.0.1:45495:45495)] 2024-12-09T03:28:16,367 DEBUG [1617b0b1421f:41295 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T03:28:16,368 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1617b0b1421f,46829,1733714895580 2024-12-09T03:28:16,369 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,46829,1733714895580, state=OPENING 2024-12-09T03:28:16,377 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T03:28:16,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:16,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:16,388 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T03:28:16,388 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:28:16,388 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:28:16,388 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,46829,1733714895580}] 2024-12-09T03:28:16,541 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T03:28:16,543 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56147, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T03:28:16,546 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T03:28:16,546 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:28:16,548 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1617b0b1421f%2C46829%2C1733714895580.meta, suffix=.meta, logDir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/WALs/1617b0b1421f,46829,1733714895580, archiveDir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/oldWALs, maxLogs=32 2024-12-09T03:28:16,548 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 1617b0b1421f%2C46829%2C1733714895580.meta.1733714896548.meta 2024-12-09T03:28:16,558 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/WALs/1617b0b1421f,46829,1733714895580/1617b0b1421f%2C46829%2C1733714895580.meta.1733714896548.meta 2024-12-09T03:28:16,569 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45495:45495),(127.0.0.1/127.0.0.1:41765:41765)] 2024-12-09T03:28:16,570 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T03:28:16,570 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T03:28:16,570 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T03:28:16,570 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T03:28:16,570 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T03:28:16,570 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T03:28:16,570 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T03:28:16,570 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T03:28:16,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T03:28:16,572 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T03:28:16,572 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:16,573 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:28:16,573 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T03:28:16,574 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T03:28:16,574 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:16,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:28:16,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T03:28:16,575 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T03:28:16,575 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:16,575 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:28:16,575 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T03:28:16,576 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T03:28:16,576 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T03:28:16,576 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T03:28:16,576 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T03:28:16,577 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/1588230740 2024-12-09T03:28:16,578 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/1588230740 2024-12-09T03:28:16,579 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T03:28:16,579 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T03:28:16,579 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T03:28:16,580 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T03:28:16,581 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=771164, jitterRate=-0.019414767622947693}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T03:28:16,581 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T03:28:16,581 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733714896571Writing region info on filesystem at 1733714896571Initializing all the Stores at 1733714896571Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714896571Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714896571Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733714896571Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733714896571Cleaning up temporary data from old regions at 1733714896579 (+8 ms)Running coprocessor post-open hooks at 1733714896581 (+2 ms)Region opened successfully at 1733714896581 2024-12-09T03:28:16,582 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733714896540 2024-12-09T03:28:16,584 DEBUG [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T03:28:16,585 INFO [RS_OPEN_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T03:28:16,585 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1617b0b1421f,46829,1733714895580 2024-12-09T03:28:16,586 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1617b0b1421f,46829,1733714895580, state=OPEN 2024-12-09T03:28:16,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:28:16,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T03:28:16,628 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1617b0b1421f,46829,1733714895580 2024-12-09T03:28:16,628 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:28:16,628 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T03:28:16,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T03:28:16,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1617b0b1421f,46829,1733714895580 in 240 msec 2024-12-09T03:28:16,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T03:28:16,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 666 msec 2024-12-09T03:28:16,634 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T03:28:16,634 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T03:28:16,635 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:28:16,635 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,46829,1733714895580, seqNum=-1] 2024-12-09T03:28:16,636 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:28:16,637 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33185, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:28:16,642 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 712 msec 2024-12-09T03:28:16,642 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733714896642, completionTime=-1 2024-12-09T03:28:16,643 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T03:28:16,643 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T03:28:16,645 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T03:28:16,645 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733714956645 2024-12-09T03:28:16,645 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733715016645 2024-12-09T03:28:16,645 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T03:28:16,645 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,41295,1733714895409-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,645 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,41295,1733714895409-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,645 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,41295,1733714895409-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,645 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1617b0b1421f:41295, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,645 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,646 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,647 DEBUG [master/1617b0b1421f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T03:28:16,649 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.009sec 2024-12-09T03:28:16,649 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T03:28:16,649 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T03:28:16,649 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T03:28:16,649 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T03:28:16,649 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T03:28:16,649 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,41295,1733714895409-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T03:28:16,649 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,41295,1733714895409-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T03:28:16,651 DEBUG [master/1617b0b1421f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T03:28:16,651 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T03:28:16,651 INFO [master/1617b0b1421f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1617b0b1421f,41295,1733714895409-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T03:28:16,704 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@232d1000, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:28:16,705 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1617b0b1421f,41295,-1 for getting cluster id 2024-12-09T03:28:16,705 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T03:28:16,707 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a4500d42-0515-43dc-b2ed-2ccc01700c24' 2024-12-09T03:28:16,707 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T03:28:16,707 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a4500d42-0515-43dc-b2ed-2ccc01700c24" 2024-12-09T03:28:16,708 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73a36ed7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:28:16,708 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1617b0b1421f,41295,-1] 2024-12-09T03:28:16,708 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T03:28:16,708 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:28:16,710 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37580, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T03:28:16,711 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cb72af6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T03:28:16,711 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T03:28:16,712 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1617b0b1421f,46829,1733714895580, seqNum=-1] 2024-12-09T03:28:16,713 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T03:28:16,714 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51522, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T03:28:16,716 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1617b0b1421f,41295,1733714895409 2024-12-09T03:28:16,716 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T03:28:16,719 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T03:28:16,719 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T03:28:16,721 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/WALs/test.com,8080,1, archiveDir=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/oldWALs, maxLogs=32 2024-12-09T03:28:16,722 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733714896722 2024-12-09T03:28:16,727 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/WALs/test.com,8080,1/test.com%2C8080%2C1.1733714896722 2024-12-09T03:28:16,728 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41765:41765),(127.0.0.1/127.0.0.1:45495:45495)] 2024-12-09T03:28:16,729 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733714896729 2024-12-09T03:28:16,734 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,734 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,734 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,734 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,734 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,734 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/WALs/test.com,8080,1/test.com%2C8080%2C1.1733714896722 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/WALs/test.com,8080,1/test.com%2C8080%2C1.1733714896729 2024-12-09T03:28:16,735 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45495:45495),(127.0.0.1/127.0.0.1:41765:41765)] 2024-12-09T03:28:16,735 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/WALs/test.com,8080,1/test.com%2C8080%2C1.1733714896722 is not closed yet, will try archiving it next time 2024-12-09T03:28:16,736 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741835_1011 (size=93) 2024-12-09T03:28:16,736 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,737 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741835_1011 (size=93) 2024-12-09T03:28:16,737 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,737 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,739 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/WALs/test.com,8080,1/test.com%2C8080%2C1.1733714896722 to hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/oldWALs/test.com%2C8080%2C1.1733714896722 2024-12-09T03:28:16,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741836_1012 (size=93) 2024-12-09T03:28:16,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741836_1012 (size=93) 2024-12-09T03:28:16,743 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/oldWALs 2024-12-09T03:28:16,743 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733714896729) 2024-12-09T03:28:16,743 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T03:28:16,743 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:28:16,743 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:28:16,743 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:28:16,743 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:28:16,743 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T03:28:16,743 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T03:28:16,743 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1456064954, stopped=false 2024-12-09T03:28:16,743 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1617b0b1421f,41295,1733714895409 2024-12-09T03:28:16,745 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-09T03:28:16,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:28:16,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T03:28:16,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:16,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:16,756 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:28:16,756 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T03:28:16,756 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:28:16,756 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:28:16,756 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:28:16,756 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1617b0b1421f,46829,1733714895580' ***** 2024-12-09T03:28:16,756 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T03:28:16,757 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T03:28:16,757 INFO [RS:0;1617b0b1421f:46829 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T03:28:16,757 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T03:28:16,757 INFO [RS:0;1617b0b1421f:46829 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T03:28:16,757 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(959): stopping server 1617b0b1421f,46829,1733714895580 2024-12-09T03:28:16,757 INFO [RS:0;1617b0b1421f:46829 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:28:16,757 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T03:28:16,757 INFO [RS:0;1617b0b1421f:46829 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1617b0b1421f:46829. 2024-12-09T03:28:16,757 DEBUG [RS:0;1617b0b1421f:46829 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T03:28:16,757 DEBUG [RS:0;1617b0b1421f:46829 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:28:16,757 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T03:28:16,757 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T03:28:16,757 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T03:28:16,757 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T03:28:16,758 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T03:28:16,758 DEBUG [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T03:28:16,758 DEBUG [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T03:28:16,758 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T03:28:16,758 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T03:28:16,758 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T03:28:16,758 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T03:28:16,758 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T03:28:16,758 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T03:28:16,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,33301,1733714694014/1617b0b1421f%2C33301%2C1733714694014.1733714694252 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:16,760 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33407/user/jenkins/test-data/a47e6819-27fc-0b37-3866-1eabdef603ce/WALs/1617b0b1421f,36295,1733714692747/1617b0b1421f%2C36295%2C1733714692747.meta.1733714693819.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T03:28:16,776 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/1588230740/.tmp/ns/c197123cb235453ca4503c45795fc62d is 43, key is default/ns:d/1733714896638/Put/seqid=0 2024-12-09T03:28:16,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741837_1013 (size=5153) 2024-12-09T03:28:16,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741837_1013 (size=5153) 2024-12-09T03:28:16,781 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/1588230740/.tmp/ns/c197123cb235453ca4503c45795fc62d 2024-12-09T03:28:16,786 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/1588230740/.tmp/ns/c197123cb235453ca4503c45795fc62d as hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/1588230740/ns/c197123cb235453ca4503c45795fc62d 2024-12-09T03:28:16,791 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/1588230740/ns/c197123cb235453ca4503c45795fc62d, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T03:28:16,792 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false 2024-12-09T03:28:16,793 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T03:28:16,797 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T03:28:16,797 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T03:28:16,798 INFO [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T03:28:16,798 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733714896758Running coprocessor pre-close hooks at 1733714896758Disabling compacts and flushes for region at 1733714896758Disabling writes for close at 1733714896758Obtaining lock to block concurrent updates at 1733714896758Preparing flush snapshotting stores in 1588230740 at 1733714896758Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733714896758Flushing stores of hbase:meta,,1.1588230740 at 1733714896759 (+1 ms)Flushing 1588230740/ns: creating writer at 1733714896759Flushing 1588230740/ns: appending metadata at 1733714896776 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733714896776Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e1fc9bc: reopening flushed file at 1733714896786 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false at 1733714896792 (+6 ms)Writing region close event to WAL at 1733714896794 (+2 ms)Running coprocessor post-close hooks at 1733714896797 (+3 ms)Closed at 1733714896797 2024-12-09T03:28:16,798 DEBUG [RS_CLOSE_META-regionserver/1617b0b1421f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T03:28:16,958 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(976): stopping server 1617b0b1421f,46829,1733714895580; all regions closed. 2024-12-09T03:28:16,959 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,959 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,959 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,959 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,959 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741834_1010 (size=1152) 2024-12-09T03:28:16,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741834_1010 (size=1152) 2024-12-09T03:28:16,965 DEBUG [RS:0;1617b0b1421f:46829 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/oldWALs 2024-12-09T03:28:16,965 INFO [RS:0;1617b0b1421f:46829 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C46829%2C1733714895580.meta:.meta(num 1733714896548) 2024-12-09T03:28:16,965 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,966 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,966 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:16,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741833_1009 (size=93) 2024-12-09T03:28:16,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741833_1009 (size=93) 2024-12-09T03:28:16,971 DEBUG [RS:0;1617b0b1421f:46829 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/oldWALs 2024-12-09T03:28:16,971 INFO [RS:0;1617b0b1421f:46829 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 1617b0b1421f%2C46829%2C1733714895580:(num 1733714896179) 2024-12-09T03:28:16,971 DEBUG [RS:0;1617b0b1421f:46829 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T03:28:16,971 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T03:28:16,971 INFO [RS:0;1617b0b1421f:46829 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:28:16,971 INFO [RS:0;1617b0b1421f:46829 {}] hbase.ChoreService(370): Chore service for: regionserver/1617b0b1421f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T03:28:16,971 INFO [RS:0;1617b0b1421f:46829 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:28:16,971 INFO [regionserver/1617b0b1421f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:28:16,971 INFO [RS:0;1617b0b1421f:46829 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46829 2024-12-09T03:28:16,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T03:28:16,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1617b0b1421f,46829,1733714895580 2024-12-09T03:28:16,977 INFO [RS:0;1617b0b1421f:46829 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:28:16,987 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1617b0b1421f,46829,1733714895580] 2024-12-09T03:28:16,998 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1617b0b1421f,46829,1733714895580 already deleted, retry=false 2024-12-09T03:28:16,998 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1617b0b1421f,46829,1733714895580 expired; onlineServers=0 2024-12-09T03:28:16,998 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1617b0b1421f,41295,1733714895409' ***** 2024-12-09T03:28:16,998 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T03:28:16,998 INFO [M:0;1617b0b1421f:41295 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T03:28:16,998 INFO [M:0;1617b0b1421f:41295 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T03:28:16,998 DEBUG [M:0;1617b0b1421f:41295 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T03:28:16,998 DEBUG [M:0;1617b0b1421f:41295 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T03:28:16,998 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T03:28:16,998 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714895936 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.small.0-1733714895936,5,FailOnTimeoutGroup] 2024-12-09T03:28:16,998 DEBUG [master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714895936 {}] cleaner.HFileCleaner(306): Exit Thread[master/1617b0b1421f:0:becomeActiveMaster-HFileCleaner.large.0-1733714895936,5,FailOnTimeoutGroup] 2024-12-09T03:28:16,998 INFO [M:0;1617b0b1421f:41295 {}] hbase.ChoreService(370): Chore service for: master/1617b0b1421f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T03:28:16,998 INFO [M:0;1617b0b1421f:41295 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T03:28:16,999 DEBUG [M:0;1617b0b1421f:41295 {}] master.HMaster(1795): Stopping service threads 2024-12-09T03:28:16,999 INFO [M:0;1617b0b1421f:41295 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T03:28:16,999 INFO [M:0;1617b0b1421f:41295 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T03:28:16,999 INFO [M:0;1617b0b1421f:41295 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T03:28:16,999 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T03:28:17,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T03:28:17,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T03:28:17,008 DEBUG [M:0;1617b0b1421f:41295 {}] zookeeper.ZKUtil(347): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T03:28:17,008 WARN [M:0;1617b0b1421f:41295 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T03:28:17,009 INFO [M:0;1617b0b1421f:41295 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/.lastflushedseqids 2024-12-09T03:28:17,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741838_1014 (size=99) 2024-12-09T03:28:17,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741838_1014 (size=99) 2024-12-09T03:28:17,014 INFO [M:0;1617b0b1421f:41295 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T03:28:17,014 INFO [M:0;1617b0b1421f:41295 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T03:28:17,014 DEBUG [M:0;1617b0b1421f:41295 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T03:28:17,014 INFO [M:0;1617b0b1421f:41295 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:17,014 DEBUG [M:0;1617b0b1421f:41295 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:17,014 DEBUG [M:0;1617b0b1421f:41295 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T03:28:17,015 DEBUG [M:0;1617b0b1421f:41295 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:17,015 INFO [M:0;1617b0b1421f:41295 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T03:28:17,033 DEBUG [M:0;1617b0b1421f:41295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98130a2ef15c463bb6d5a63778b902b5 is 82, key is hbase:meta,,1/info:regioninfo/1733714896585/Put/seqid=0 2024-12-09T03:28:17,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741839_1015 (size=5672) 2024-12-09T03:28:17,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741839_1015 (size=5672) 2024-12-09T03:28:17,037 INFO [M:0;1617b0b1421f:41295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98130a2ef15c463bb6d5a63778b902b5 2024-12-09T03:28:17,068 DEBUG [M:0;1617b0b1421f:41295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/839e75a88e524a2daf19209609cf2e5c is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733714896642/Put/seqid=0 2024-12-09T03:28:17,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741840_1016 (size=5275) 2024-12-09T03:28:17,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741840_1016 (size=5275) 2024-12-09T03:28:17,073 INFO [M:0;1617b0b1421f:41295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/839e75a88e524a2daf19209609cf2e5c 2024-12-09T03:28:17,087 INFO [RS:0;1617b0b1421f:46829 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:28:17,088 INFO [RS:0;1617b0b1421f:46829 {}] regionserver.HRegionServer(1031): Exiting; stopping=1617b0b1421f,46829,1733714895580; zookeeper connection closed. 2024-12-09T03:28:17,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:28:17,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46829-0x100089de8d50001, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:28:17,088 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@774f642 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@774f642 2024-12-09T03:28:17,088 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T03:28:17,093 DEBUG [M:0;1617b0b1421f:41295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8d3f9d0e9e4846b3a8a797941a4fcd5a is 69, key is 1617b0b1421f,46829,1733714895580/rs:state/1733714896036/Put/seqid=0 2024-12-09T03:28:17,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741841_1017 (size=5156) 2024-12-09T03:28:17,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741841_1017 (size=5156) 2024-12-09T03:28:17,097 INFO [M:0;1617b0b1421f:41295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8d3f9d0e9e4846b3a8a797941a4fcd5a 2024-12-09T03:28:17,117 DEBUG [M:0;1617b0b1421f:41295 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/85a485a4c24f4780bf56c748789e834c is 52, key is load_balancer_on/state:d/1733714896718/Put/seqid=0 2024-12-09T03:28:17,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741842_1018 (size=5056) 2024-12-09T03:28:17,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741842_1018 (size=5056) 2024-12-09T03:28:17,122 INFO [M:0;1617b0b1421f:41295 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/85a485a4c24f4780bf56c748789e834c 2024-12-09T03:28:17,127 DEBUG [M:0;1617b0b1421f:41295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98130a2ef15c463bb6d5a63778b902b5 as hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/98130a2ef15c463bb6d5a63778b902b5 2024-12-09T03:28:17,131 INFO [M:0;1617b0b1421f:41295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/98130a2ef15c463bb6d5a63778b902b5, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T03:28:17,133 DEBUG [M:0;1617b0b1421f:41295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/839e75a88e524a2daf19209609cf2e5c as hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/839e75a88e524a2daf19209609cf2e5c 2024-12-09T03:28:17,138 INFO [M:0;1617b0b1421f:41295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/839e75a88e524a2daf19209609cf2e5c, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T03:28:17,139 DEBUG [M:0;1617b0b1421f:41295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8d3f9d0e9e4846b3a8a797941a4fcd5a as hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8d3f9d0e9e4846b3a8a797941a4fcd5a 2024-12-09T03:28:17,143 INFO [M:0;1617b0b1421f:41295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8d3f9d0e9e4846b3a8a797941a4fcd5a, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T03:28:17,144 DEBUG [M:0;1617b0b1421f:41295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/85a485a4c24f4780bf56c748789e834c as hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/85a485a4c24f4780bf56c748789e834c 2024-12-09T03:28:17,149 INFO [M:0;1617b0b1421f:41295 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33195/user/jenkins/test-data/6152eac2-d544-8161-6b4d-daf802bf5769/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/85a485a4c24f4780bf56c748789e834c, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T03:28:17,150 INFO [M:0;1617b0b1421f:41295 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=29, compaction requested=false 2024-12-09T03:28:17,151 INFO [M:0;1617b0b1421f:41295 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T03:28:17,151 DEBUG [M:0;1617b0b1421f:41295 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733714897014Disabling compacts and flushes for region at 1733714897014Disabling writes for close at 1733714897014Obtaining lock to block concurrent updates at 1733714897015 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733714897015Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733714897015Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733714897016 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733714897016Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733714897032 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733714897032Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733714897041 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733714897068 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733714897068Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733714897078 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733714897092 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733714897092Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733714897101 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733714897117 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733714897117Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ba09953: reopening flushed file at 1733714897126 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fa50aa4: reopening flushed file at 1733714897132 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44229054: reopening flushed file at 1733714897138 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a899739: reopening flushed file at 1733714897143 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=29, compaction requested=false at 1733714897150 (+7 ms)Writing region close event to WAL at 1733714897151 (+1 ms)Closed at 1733714897151 2024-12-09T03:28:17,151 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:17,152 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:17,152 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:17,152 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:17,152 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T03:28:17,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35573 is added to blk_1073741830_1006 (size=10311) 2024-12-09T03:28:17,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36305 is added to blk_1073741830_1006 (size=10311) 2024-12-09T03:28:17,154 INFO [M:0;1617b0b1421f:41295 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T03:28:17,154 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T03:28:17,154 INFO [M:0;1617b0b1421f:41295 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41295 2024-12-09T03:28:17,154 INFO [M:0;1617b0b1421f:41295 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T03:28:17,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:28:17,325 INFO [M:0;1617b0b1421f:41295 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T03:28:17,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41295-0x100089de8d50000, quorum=127.0.0.1:59403, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T03:28:17,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67700d38{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:28:17,327 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@714fc7b6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:28:17,327 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:28:17,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b6a009{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:28:17,328 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3353f5c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/hadoop.log.dir/,STOPPED} 2024-12-09T03:28:17,329 WARN [BP-1533848234-172.17.0.3-1733714893156 heartbeating to localhost/127.0.0.1:33195 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:28:17,329 WARN [BP-1533848234-172.17.0.3-1733714893156 heartbeating to localhost/127.0.0.1:33195 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1533848234-172.17.0.3-1733714893156 (Datanode Uuid b4a0c22b-5aeb-4ff9-a81c-81a4dd6f49ec) service to localhost/127.0.0.1:33195 2024-12-09T03:28:17,329 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:28:17,329 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:28:17,329 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4/data/data3/current/BP-1533848234-172.17.0.3-1733714893156 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:28:17,329 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4/data/data4/current/BP-1533848234-172.17.0.3-1733714893156 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:28:17,330 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:28:17,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4d7b0249{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T03:28:17,332 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@71d7fc74{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:28:17,332 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:28:17,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bc56624{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:28:17,332 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d8843b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/hadoop.log.dir/,STOPPED} 2024-12-09T03:28:17,333 WARN [BP-1533848234-172.17.0.3-1733714893156 heartbeating to localhost/127.0.0.1:33195 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T03:28:17,333 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T03:28:17,333 WARN [BP-1533848234-172.17.0.3-1733714893156 heartbeating to localhost/127.0.0.1:33195 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1533848234-172.17.0.3-1733714893156 (Datanode Uuid 802bea98-2a52-4677-b07e-941d6c442098) service to localhost/127.0.0.1:33195 2024-12-09T03:28:17,333 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T03:28:17,334 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4/data/data1/current/BP-1533848234-172.17.0.3-1733714893156 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:28:17,334 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/cluster_32294c7a-65fc-0955-897c-75a3e3f30cd4/data/data2/current/BP-1533848234-172.17.0.3-1733714893156 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T03:28:17,334 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T03:28:17,340 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d0e457e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T03:28:17,340 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b726df5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T03:28:17,340 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T03:28:17,340 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ab0454{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T03:28:17,340 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27d58507{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a519490e-7055-0d51-8073-218e4ecf42ba/hadoop.log.dir/,STOPPED} 2024-12-09T03:28:17,346 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T03:28:17,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T03:28:17,368 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=266 (was 227) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33195 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33195 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:33195 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:33195 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:33195 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33195 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33195 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:33195 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=540 (was 523) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=226 (was 202) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4923 (was 4942)