2024-11-18 18:45:23,362 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-18 18:45:23,375 main DEBUG Took 0.011082 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-18 18:45:23,375 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-18 18:45:23,376 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-18 18:45:23,377 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-18 18:45:23,378 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,386 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-18 18:45:23,402 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,404 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,405 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,405 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,406 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,406 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,408 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,408 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,409 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,410 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,411 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,411 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,412 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,413 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,413 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,414 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,414 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,415 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,415 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,416 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,416 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,417 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,417 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,418 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 18:45:23,419 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,419 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-18 18:45:23,421 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 18:45:23,423 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-18 18:45:23,425 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-18 18:45:23,426 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-18 18:45:23,428 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-18 18:45:23,429 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-18 18:45:23,438 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-18 18:45:23,440 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-18 18:45:23,442 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-18 18:45:23,443 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-18 18:45:23,444 main DEBUG createAppenders(={Console}) 2024-11-18 18:45:23,444 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-18 18:45:23,445 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-18 18:45:23,445 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-18 18:45:23,446 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-18 18:45:23,446 main DEBUG OutputStream closed 2024-11-18 18:45:23,447 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-18 18:45:23,447 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-18 18:45:23,447 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-18 18:45:23,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-18 18:45:23,551 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-18 18:45:23,553 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-18 18:45:23,554 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-18 18:45:23,555 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-18 18:45:23,556 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-18 18:45:23,557 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-18 18:45:23,557 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-18 18:45:23,558 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-18 18:45:23,559 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-18 18:45:23,559 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-18 18:45:23,560 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-18 18:45:23,560 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-18 18:45:23,561 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-18 18:45:23,561 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-18 18:45:23,562 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-18 18:45:23,562 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-18 18:45:23,563 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-18 18:45:23,566 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-18 18:45:23,567 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-18 18:45:23,567 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-18 18:45:23,568 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-18T18:45:23,880 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2 2024-11-18 18:45:23,883 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-18 18:45:23,884 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-18T18:45:23,893 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-18T18:45:23,927 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=338, ProcessCount=11, AvailableMemoryMB=5815 2024-11-18T18:45:23,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T18:45:23,950 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3, deleteOnExit=true 2024-11-18T18:45:23,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T18:45:23,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/test.cache.data in system properties and HBase conf 2024-11-18T18:45:23,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T18:45:23,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/hadoop.log.dir in system properties and HBase conf 2024-11-18T18:45:23,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T18:45:23,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T18:45:23,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T18:45:24,048 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-18T18:45:24,132 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T18:45:24,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:45:24,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:45:24,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T18:45:24,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:45:24,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T18:45:24,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T18:45:24,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:45:24,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:45:24,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T18:45:24,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/nfs.dump.dir in system properties and HBase conf 2024-11-18T18:45:24,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/java.io.tmpdir in system properties and HBase conf 2024-11-18T18:45:24,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:45:24,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T18:45:24,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T18:45:24,616 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:45:25,194 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-18T18:45:25,263 INFO [Time-limited test {}] log.Log(170): Logging initialized @2651ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-18T18:45:25,330 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:45:25,387 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:45:25,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:45:25,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:45:25,407 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:45:25,418 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:45:25,421 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:45:25,422 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:45:25,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de997b9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/java.io.tmpdir/jetty-localhost-43359-hadoop-hdfs-3_4_1-tests_jar-_-any-2667856934881886145/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:45:25,597 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:43359} 2024-11-18T18:45:25,597 INFO [Time-limited test {}] server.Server(415): Started @2986ms 2024-11-18T18:45:25,621 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:45:26,142 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:45:26,153 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:45:26,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:45:26,155 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:45:26,155 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:45:26,156 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3305dd74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:45:26,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7893eb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:45:26,267 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f93babe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/java.io.tmpdir/jetty-localhost-40023-hadoop-hdfs-3_4_1-tests_jar-_-any-5421092617023616267/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:45:26,268 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@737d6c99{HTTP/1.1, (http/1.1)}{localhost:40023} 2024-11-18T18:45:26,268 INFO [Time-limited test {}] server.Server(415): Started @3657ms 2024-11-18T18:45:26,324 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:45:26,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:45:26,439 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:45:26,441 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:45:26,442 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:45:26,442 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:45:26,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ff5148a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:45:26,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25ca9bb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:45:26,597 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c963ecd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/java.io.tmpdir/jetty-localhost-32773-hadoop-hdfs-3_4_1-tests_jar-_-any-3318046623493743789/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:45:26,599 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a10aed{HTTP/1.1, (http/1.1)}{localhost:32773} 2024-11-18T18:45:26,600 INFO [Time-limited test {}] server.Server(415): Started @3988ms 2024-11-18T18:45:26,606 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:45:27,754 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3/data/data1/current/BP-2001088950-172.17.0.2-1731955524707/current, will proceed with Du for space computation calculation, 2024-11-18T18:45:27,754 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3/data/data4/current/BP-2001088950-172.17.0.2-1731955524707/current, will proceed with Du for space computation calculation, 2024-11-18T18:45:27,754 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3/data/data3/current/BP-2001088950-172.17.0.2-1731955524707/current, will proceed with Du for space computation calculation, 2024-11-18T18:45:27,754 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3/data/data2/current/BP-2001088950-172.17.0.2-1731955524707/current, will proceed with Du for space computation calculation, 2024-11-18T18:45:27,788 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:45:27,789 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:45:27,834 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1541a366a538ea50 with lease ID 0x3481dac1d4df143f: Processing first storage report for DS-5aec451d-60cb-435c-a9e5-e94273ae4e83 from datanode DatanodeRegistration(127.0.0.1:39597, datanodeUuid=120b48cb-4ef7-4503-8f50-9711d2401e6c, infoPort=34961, infoSecurePort=0, ipcPort=34221, storageInfo=lv=-57;cid=testClusterID;nsid=1938598412;c=1731955524707) 2024-11-18T18:45:27,836 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1541a366a538ea50 with lease ID 0x3481dac1d4df143f: from storage DS-5aec451d-60cb-435c-a9e5-e94273ae4e83 node DatanodeRegistration(127.0.0.1:39597, datanodeUuid=120b48cb-4ef7-4503-8f50-9711d2401e6c, infoPort=34961, infoSecurePort=0, ipcPort=34221, storageInfo=lv=-57;cid=testClusterID;nsid=1938598412;c=1731955524707), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T18:45:27,836 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb0efe126659a11fe with lease ID 0x3481dac1d4df143e: Processing first storage report for DS-abc7a027-1505-4609-92b9-7f2c388ad08b from datanode DatanodeRegistration(127.0.0.1:39253, datanodeUuid=59da6ac4-74e9-4811-8809-bda8259b5bc1, infoPort=37669, infoSecurePort=0, ipcPort=39715, storageInfo=lv=-57;cid=testClusterID;nsid=1938598412;c=1731955524707) 2024-11-18T18:45:27,836 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0efe126659a11fe with lease ID 0x3481dac1d4df143e: from storage DS-abc7a027-1505-4609-92b9-7f2c388ad08b node DatanodeRegistration(127.0.0.1:39253, datanodeUuid=59da6ac4-74e9-4811-8809-bda8259b5bc1, infoPort=37669, infoSecurePort=0, ipcPort=39715, storageInfo=lv=-57;cid=testClusterID;nsid=1938598412;c=1731955524707), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:45:27,837 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1541a366a538ea50 with lease ID 0x3481dac1d4df143f: Processing first storage report for DS-99f008c0-c1ce-4135-bd48-1ced62798dee from datanode DatanodeRegistration(127.0.0.1:39597, datanodeUuid=120b48cb-4ef7-4503-8f50-9711d2401e6c, infoPort=34961, infoSecurePort=0, ipcPort=34221, storageInfo=lv=-57;cid=testClusterID;nsid=1938598412;c=1731955524707) 2024-11-18T18:45:27,837 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1541a366a538ea50 with lease ID 0x3481dac1d4df143f: from storage DS-99f008c0-c1ce-4135-bd48-1ced62798dee node DatanodeRegistration(127.0.0.1:39597, datanodeUuid=120b48cb-4ef7-4503-8f50-9711d2401e6c, infoPort=34961, infoSecurePort=0, ipcPort=34221, storageInfo=lv=-57;cid=testClusterID;nsid=1938598412;c=1731955524707), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T18:45:27,838 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb0efe126659a11fe with lease ID 0x3481dac1d4df143e: Processing first storage report for DS-67df09a9-0604-4f23-b1ed-3caa6f3d5c80 from datanode DatanodeRegistration(127.0.0.1:39253, datanodeUuid=59da6ac4-74e9-4811-8809-bda8259b5bc1, infoPort=37669, infoSecurePort=0, ipcPort=39715, storageInfo=lv=-57;cid=testClusterID;nsid=1938598412;c=1731955524707) 2024-11-18T18:45:27,838 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0efe126659a11fe with lease ID 0x3481dac1d4df143e: from storage DS-67df09a9-0604-4f23-b1ed-3caa6f3d5c80 node DatanodeRegistration(127.0.0.1:39253, datanodeUuid=59da6ac4-74e9-4811-8809-bda8259b5bc1, infoPort=37669, infoSecurePort=0, ipcPort=39715, storageInfo=lv=-57;cid=testClusterID;nsid=1938598412;c=1731955524707), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:45:27,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2 2024-11-18T18:45:27,910 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3/zookeeper_0, clientPort=50315, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T18:45:27,919 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50315 2024-11-18T18:45:27,933 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:45:27,937 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:45:28,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:45:28,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:45:28,569 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9 with version=8 2024-11-18T18:45:28,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/hbase-staging 2024-11-18T18:45:28,644 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-18T18:45:28,857 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:45:28,866 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:45:28,867 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:45:28,871 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:45:28,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:45:28,872 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:45:28,988 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T18:45:29,037 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-18T18:45:29,045 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-18T18:45:29,048 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:45:29,070 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 35821 (auto-detected) 2024-11-18T18:45:29,071 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-18T18:45:29,087 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34897 2024-11-18T18:45:29,105 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34897 connecting to ZooKeeper ensemble=127.0.0.1:50315 2024-11-18T18:45:29,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348970x0, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:45:29,226 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34897-0x101508dc2bb0000 connected 2024-11-18T18:45:29,338 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:45:29,343 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:45:29,353 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:45:29,356 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9, hbase.cluster.distributed=false 2024-11-18T18:45:29,379 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:45:29,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34897 2024-11-18T18:45:29,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34897 2024-11-18T18:45:29,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34897 2024-11-18T18:45:29,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34897 2024-11-18T18:45:29,386 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34897 2024-11-18T18:45:29,509 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:45:29,511 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:45:29,511 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:45:29,511 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:45:29,512 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:45:29,512 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:45:29,514 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T18:45:29,517 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:45:29,518 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34885 2024-11-18T18:45:29,520 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34885 connecting to ZooKeeper ensemble=127.0.0.1:50315 2024-11-18T18:45:29,521 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:45:29,525 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:45:29,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348850x0, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:45:29,543 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:45:29,543 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34885-0x101508dc2bb0001 connected 2024-11-18T18:45:29,547 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T18:45:29,555 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T18:45:29,558 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T18:45:29,564 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:45:29,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34885 2024-11-18T18:45:29,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34885 2024-11-18T18:45:29,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34885 2024-11-18T18:45:29,569 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34885 2024-11-18T18:45:29,569 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34885 2024-11-18T18:45:29,588 DEBUG [M:0;39fff3b0f89c:34897 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39fff3b0f89c:34897 2024-11-18T18:45:29,589 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39fff3b0f89c,34897,1731955528708 2024-11-18T18:45:29,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:45:29,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:45:29,602 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39fff3b0f89c,34897,1731955528708 2024-11-18T18:45:29,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:29,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T18:45:29,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:29,635 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T18:45:29,637 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39fff3b0f89c,34897,1731955528708 from backup master directory 2024-11-18T18:45:29,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:45:29,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39fff3b0f89c,34897,1731955528708 2024-11-18T18:45:29,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:45:29,648 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:45:29,648 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39fff3b0f89c,34897,1731955528708 2024-11-18T18:45:29,651 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-18T18:45:29,652 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-18T18:45:29,708 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/hbase.id] with ID: c5c86166-2d4a-4360-8f2a-fc4d8fb290b4 2024-11-18T18:45:29,708 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/.tmp/hbase.id 2024-11-18T18:45:29,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:45:29,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:45:29,722 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/.tmp/hbase.id]:[hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/hbase.id] 2024-11-18T18:45:29,769 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:45:29,773 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T18:45:29,793 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-18T18:45:29,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:29,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:29,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:45:29,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:45:29,840 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:45:29,842 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T18:45:29,847 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:45:29,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:45:29,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:45:29,892 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store 2024-11-18T18:45:29,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:45:29,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:45:29,920 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-18T18:45:29,924 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:45:29,926 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:45:29,926 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:45:29,926 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:45:29,928 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:45:29,928 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:45:29,928 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:45:29,929 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955529925Disabling compacts and flushes for region at 1731955529925Disabling writes for close at 1731955529928 (+3 ms)Writing region close event to WAL at 1731955529928Closed at 1731955529928 2024-11-18T18:45:29,932 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/.initializing 2024-11-18T18:45:29,932 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/WALs/39fff3b0f89c,34897,1731955528708 2024-11-18T18:45:29,954 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C34897%2C1731955528708, suffix=, logDir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/WALs/39fff3b0f89c,34897,1731955528708, archiveDir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/oldWALs, maxLogs=10 2024-11-18T18:45:29,965 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C34897%2C1731955528708.1731955529960 2024-11-18T18:45:29,986 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/WALs/39fff3b0f89c,34897,1731955528708/39fff3b0f89c%2C34897%2C1731955528708.1731955529960 2024-11-18T18:45:29,993 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37669:37669),(127.0.0.1/127.0.0.1:34961:34961)] 2024-11-18T18:45:29,995 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:45:29,995 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:45:29,998 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:29,999 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:30,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:30,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T18:45:30,058 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:30,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:45:30,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:30,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T18:45:30,064 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:30,065 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:45:30,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:30,068 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T18:45:30,069 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:30,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:45:30,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:30,073 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T18:45:30,073 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:30,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:45:30,075 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:30,078 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:30,080 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:30,085 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:30,086 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:30,089 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T18:45:30,094 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:45:30,099 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:45:30,100 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=726992, jitterRate=-0.07558229565620422}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T18:45:30,107 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731955530010Initializing all the Stores at 1731955530012 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955530013 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955530013Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955530013Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955530014 (+1 ms)Cleaning up temporary data from old regions at 1731955530086 (+72 ms)Region opened successfully at 1731955530107 (+21 ms) 2024-11-18T18:45:30,109 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T18:45:30,140 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fd326f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:45:30,166 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T18:45:30,176 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T18:45:30,176 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T18:45:30,178 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T18:45:30,180 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-18T18:45:30,185 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-18T18:45:30,185 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T18:45:30,208 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T18:45:30,215 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T18:45:30,250 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T18:45:30,252 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T18:45:30,254 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T18:45:30,263 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T18:45:30,266 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T18:45:30,270 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T18:45:30,279 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T18:45:30,282 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T18:45:30,291 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T18:45:30,311 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T18:45:30,321 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T18:45:30,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:45:30,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:45:30,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:30,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:30,337 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39fff3b0f89c,34897,1731955528708, sessionid=0x101508dc2bb0000, setting cluster-up flag (Was=false) 2024-11-18T18:45:30,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:30,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:30,391 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T18:45:30,394 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,34897,1731955528708 2024-11-18T18:45:30,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:30,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:30,450 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T18:45:30,454 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,34897,1731955528708 2024-11-18T18:45:30,467 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T18:45:30,473 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(746): ClusterId : c5c86166-2d4a-4360-8f2a-fc4d8fb290b4 2024-11-18T18:45:30,477 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T18:45:30,491 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T18:45:30,491 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T18:45:30,501 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T18:45:30,502 DEBUG [RS:0;39fff3b0f89c:34885 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f1c977d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:45:30,522 DEBUG [RS:0;39fff3b0f89c:34885 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39fff3b0f89c:34885 2024-11-18T18:45:30,525 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T18:45:30,525 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T18:45:30,526 DEBUG [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T18:45:30,528 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(2659): reportForDuty to master=39fff3b0f89c,34897,1731955528708 with port=34885, startcode=1731955529476 2024-11-18T18:45:30,538 DEBUG [RS:0;39fff3b0f89c:34885 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T18:45:30,543 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T18:45:30,552 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T18:45:30,558 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T18:45:30,563 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39fff3b0f89c,34897,1731955528708 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T18:45:30,569 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:45:30,569 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:45:30,569 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:45:30,569 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:45:30,570 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39fff3b0f89c:0, corePoolSize=10, maxPoolSize=10 2024-11-18T18:45:30,570 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,570 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:45:30,570 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,575 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731955560575 2024-11-18T18:45:30,575 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:45:30,576 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T18:45:30,577 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T18:45:30,578 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T18:45:30,581 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T18:45:30,582 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T18:45:30,582 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T18:45:30,582 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T18:45:30,584 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:30,584 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T18:45:30,583 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,587 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T18:45:30,588 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T18:45:30,589 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T18:45:30,591 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T18:45:30,592 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T18:45:30,594 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955530593,5,FailOnTimeoutGroup] 2024-11-18T18:45:30,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:45:30,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:45:30,599 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T18:45:30,599 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955530594,5,FailOnTimeoutGroup] 2024-11-18T18:45:30,599 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,600 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T18:45:30,600 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9 2024-11-18T18:45:30,601 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,601 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,608 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52259, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T18:45:30,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:45:30,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:45:30,615 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:45:30,615 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34897 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39fff3b0f89c,34885,1731955529476 2024-11-18T18:45:30,618 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:45:30,618 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34897 {}] master.ServerManager(517): Registering regionserver=39fff3b0f89c,34885,1731955529476 2024-11-18T18:45:30,621 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:45:30,621 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:30,623 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:45:30,623 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:45:30,625 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:45:30,626 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:30,627 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:45:30,627 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:45:30,630 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:45:30,630 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:30,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:45:30,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:45:30,634 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:45:30,634 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:30,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:45:30,635 DEBUG [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9 2024-11-18T18:45:30,635 DEBUG [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35907 2024-11-18T18:45:30,635 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:45:30,635 DEBUG [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T18:45:30,636 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740 2024-11-18T18:45:30,637 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740 2024-11-18T18:45:30,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:45:30,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:45:30,641 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:45:30,644 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:45:30,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:45:30,647 DEBUG [RS:0;39fff3b0f89c:34885 {}] zookeeper.ZKUtil(111): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39fff3b0f89c,34885,1731955529476 2024-11-18T18:45:30,647 WARN [RS:0;39fff3b0f89c:34885 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:45:30,647 INFO [RS:0;39fff3b0f89c:34885 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:45:30,647 DEBUG [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476 2024-11-18T18:45:30,649 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:45:30,650 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39fff3b0f89c,34885,1731955529476] 2024-11-18T18:45:30,650 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770683, jitterRate=-0.020026564598083496}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:45:30,655 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731955530615Initializing all the Stores at 1731955530617 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955530617Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955530618 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955530618Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955530618Cleaning up temporary data from old regions at 1731955530640 (+22 ms)Region opened successfully at 1731955530654 (+14 ms) 2024-11-18T18:45:30,655 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:45:30,655 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:45:30,655 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:45:30,655 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:45:30,655 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:45:30,657 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:45:30,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955530655Disabling compacts and flushes for region at 1731955530655Disabling writes for close at 1731955530655Writing region close event to WAL at 1731955530656 (+1 ms)Closed at 1731955530657 (+1 ms) 2024-11-18T18:45:30,661 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:45:30,661 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T18:45:30,666 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T18:45:30,672 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T18:45:30,674 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:45:30,676 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T18:45:30,688 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T18:45:30,692 INFO [RS:0;39fff3b0f89c:34885 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T18:45:30,693 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,694 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T18:45:30,699 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T18:45:30,700 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,700 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,700 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,701 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,701 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,701 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,701 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:45:30,701 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,701 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,702 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,702 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,702 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,702 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:45:30,702 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:45:30,702 DEBUG [RS:0;39fff3b0f89c:34885 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:45:30,703 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,703 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,704 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,704 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,704 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,704 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,34885,1731955529476-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:45:30,720 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T18:45:30,722 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,34885,1731955529476-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,723 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,723 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.Replication(171): 39fff3b0f89c,34885,1731955529476 started 2024-11-18T18:45:30,739 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:30,739 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(1482): Serving as 39fff3b0f89c,34885,1731955529476, RpcServer on 39fff3b0f89c/172.17.0.2:34885, sessionid=0x101508dc2bb0001 2024-11-18T18:45:30,740 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T18:45:30,740 DEBUG [RS:0;39fff3b0f89c:34885 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39fff3b0f89c,34885,1731955529476 2024-11-18T18:45:30,741 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,34885,1731955529476' 2024-11-18T18:45:30,741 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T18:45:30,742 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T18:45:30,742 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T18:45:30,743 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T18:45:30,743 DEBUG [RS:0;39fff3b0f89c:34885 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39fff3b0f89c,34885,1731955529476 2024-11-18T18:45:30,743 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,34885,1731955529476' 2024-11-18T18:45:30,743 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T18:45:30,744 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T18:45:30,744 DEBUG [RS:0;39fff3b0f89c:34885 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T18:45:30,744 INFO [RS:0;39fff3b0f89c:34885 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T18:45:30,744 INFO [RS:0;39fff3b0f89c:34885 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T18:45:30,827 WARN [39fff3b0f89c:34897 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-18T18:45:30,851 INFO [RS:0;39fff3b0f89c:34885 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C34885%2C1731955529476, suffix=, logDir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476, archiveDir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/oldWALs, maxLogs=32 2024-11-18T18:45:30,854 INFO [RS:0;39fff3b0f89c:34885 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C34885%2C1731955529476.1731955530854 2024-11-18T18:45:30,864 INFO [RS:0;39fff3b0f89c:34885 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955530854 2024-11-18T18:45:30,868 DEBUG [RS:0;39fff3b0f89c:34885 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37669:37669),(127.0.0.1/127.0.0.1:34961:34961)] 2024-11-18T18:45:31,082 DEBUG [39fff3b0f89c:34897 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T18:45:31,096 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39fff3b0f89c,34885,1731955529476 2024-11-18T18:45:31,102 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,34885,1731955529476, state=OPENING 2024-11-18T18:45:31,141 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T18:45:31,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:31,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:45:31,151 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:45:31,151 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:45:31,153 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:45:31,156 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,34885,1731955529476}] 2024-11-18T18:45:31,334 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T18:45:31,338 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37755, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T18:45:31,352 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T18:45:31,353 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:45:31,357 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C34885%2C1731955529476.meta, suffix=.meta, logDir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476, archiveDir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/oldWALs, maxLogs=32 2024-11-18T18:45:31,359 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C34885%2C1731955529476.meta.1731955531358.meta 2024-11-18T18:45:31,369 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.meta.1731955531358.meta 2024-11-18T18:45:31,371 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37669:37669),(127.0.0.1/127.0.0.1:34961:34961)] 2024-11-18T18:45:31,373 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:45:31,375 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T18:45:31,378 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T18:45:31,382 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T18:45:31,386 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T18:45:31,387 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:45:31,388 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T18:45:31,388 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T18:45:31,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:45:31,393 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:45:31,394 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:31,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:45:31,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:45:31,397 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:45:31,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:31,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:45:31,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:45:31,401 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:45:31,401 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:31,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:45:31,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:45:31,404 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:45:31,404 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:31,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:45:31,406 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:45:31,408 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740 2024-11-18T18:45:31,411 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740 2024-11-18T18:45:31,414 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:45:31,415 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:45:31,416 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:45:31,420 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:45:31,422 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882947, jitterRate=0.12272551655769348}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:45:31,422 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T18:45:31,424 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731955531388Writing region info on filesystem at 1731955531388Initializing all the Stores at 1731955531390 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955531391 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955531391Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955531391Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955531391Cleaning up temporary data from old regions at 1731955531415 (+24 ms)Running coprocessor post-open hooks at 1731955531423 (+8 ms)Region opened successfully at 1731955531424 (+1 ms) 2024-11-18T18:45:31,433 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731955531325 2024-11-18T18:45:31,448 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T18:45:31,449 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T18:45:31,451 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,34885,1731955529476 2024-11-18T18:45:31,455 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,34885,1731955529476, state=OPEN 2024-11-18T18:45:31,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:45:31,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:45:31,505 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:45:31,505 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:45:31,505 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39fff3b0f89c,34885,1731955529476 2024-11-18T18:45:31,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T18:45:31,512 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,34885,1731955529476 in 349 msec 2024-11-18T18:45:31,519 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T18:45:31,519 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 847 msec 2024-11-18T18:45:31,521 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:45:31,521 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T18:45:31,539 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:45:31,540 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,34885,1731955529476, seqNum=-1] 2024-11-18T18:45:31,559 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:45:31,561 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42601, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:45:31,586 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0850 sec 2024-11-18T18:45:31,587 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731955531587, completionTime=-1 2024-11-18T18:45:31,590 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T18:45:31,590 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-18T18:45:31,623 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-18T18:45:31,623 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731955591623 2024-11-18T18:45:31,623 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731955651623 2024-11-18T18:45:31,623 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 32 msec 2024-11-18T18:45:31,637 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,34897,1731955528708-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:31,638 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,34897,1731955528708-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:31,638 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,34897,1731955528708-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:31,640 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39fff3b0f89c:34897, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:31,640 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:31,643 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:31,673 DEBUG [master/39fff3b0f89c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T18:45:31,699 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.051sec 2024-11-18T18:45:31,700 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T18:45:31,702 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T18:45:31,703 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T18:45:31,704 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T18:45:31,704 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T18:45:31,705 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,34897,1731955528708-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:45:31,706 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,34897,1731955528708-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T18:45:31,714 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T18:45:31,716 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T18:45:31,716 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,34897,1731955528708-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:45:31,784 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a4c629c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:45:31,786 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-18T18:45:31,786 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-18T18:45:31,790 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39fff3b0f89c,34897,-1 for getting cluster id 2024-11-18T18:45:31,793 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T18:45:31,802 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c5c86166-2d4a-4360-8f2a-fc4d8fb290b4' 2024-11-18T18:45:31,804 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T18:45:31,804 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c5c86166-2d4a-4360-8f2a-fc4d8fb290b4" 2024-11-18T18:45:31,805 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b94835f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:45:31,805 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39fff3b0f89c,34897,-1] 2024-11-18T18:45:31,808 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T18:45:31,810 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:45:31,811 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47374, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T18:45:31,814 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5286c427, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:45:31,815 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:45:31,822 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,34885,1731955529476, seqNum=-1] 2024-11-18T18:45:31,822 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:45:31,824 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38506, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:45:31,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39fff3b0f89c,34897,1731955528708 2024-11-18T18:45:31,844 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:45:31,851 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T18:45:31,855 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T18:45:31,860 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 39fff3b0f89c,34897,1731955528708 2024-11-18T18:45:31,862 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1138b495 2024-11-18T18:45:31,863 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T18:45:31,866 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47386, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T18:45:31,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34897 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T18:45:31,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34897 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T18:45:31,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34897 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:45:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34897 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-18T18:45:31,881 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T18:45:31,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34897 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-18T18:45:31,884 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:31,887 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T18:45:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T18:45:31,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741835_1011 (size=389) 2024-11-18T18:45:31,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741835_1011 (size=389) 2024-11-18T18:45:31,929 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 806fa05c18d1748062cac13a4cda41b0, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9 2024-11-18T18:45:31,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741836_1012 (size=72) 2024-11-18T18:45:31,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741836_1012 (size=72) 2024-11-18T18:45:31,940 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:45:31,940 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 806fa05c18d1748062cac13a4cda41b0, disabling compactions & flushes 2024-11-18T18:45:31,940 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:45:31,940 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:45:31,940 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. after waiting 0 ms 2024-11-18T18:45:31,940 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:45:31,940 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:45:31,940 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 806fa05c18d1748062cac13a4cda41b0: Waiting for close lock at 1731955531940Disabling compacts and flushes for region at 1731955531940Disabling writes for close at 1731955531940Writing region close event to WAL at 1731955531940Closed at 1731955531940 2024-11-18T18:45:31,942 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T18:45:31,949 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731955531942"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731955531942"}]},"ts":"1731955531942"} 2024-11-18T18:45:31,954 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T18:45:31,956 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T18:45:31,959 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731955531956"}]},"ts":"1731955531956"} 2024-11-18T18:45:31,963 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-18T18:45:31,965 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=806fa05c18d1748062cac13a4cda41b0, ASSIGN}] 2024-11-18T18:45:31,967 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=806fa05c18d1748062cac13a4cda41b0, ASSIGN 2024-11-18T18:45:31,969 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=806fa05c18d1748062cac13a4cda41b0, ASSIGN; state=OFFLINE, location=39fff3b0f89c,34885,1731955529476; forceNewPlan=false, retain=false 2024-11-18T18:45:32,121 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=806fa05c18d1748062cac13a4cda41b0, regionState=OPENING, regionLocation=39fff3b0f89c,34885,1731955529476 2024-11-18T18:45:32,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=806fa05c18d1748062cac13a4cda41b0, ASSIGN because future has completed 2024-11-18T18:45:32,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 806fa05c18d1748062cac13a4cda41b0, server=39fff3b0f89c,34885,1731955529476}] 2024-11-18T18:45:32,290 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:45:32,290 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 806fa05c18d1748062cac13a4cda41b0, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:45:32,291 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:32,291 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:45:32,291 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:32,291 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:32,293 INFO [StoreOpener-806fa05c18d1748062cac13a4cda41b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:32,296 INFO [StoreOpener-806fa05c18d1748062cac13a4cda41b0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 806fa05c18d1748062cac13a4cda41b0 columnFamilyName info 2024-11-18T18:45:32,296 DEBUG [StoreOpener-806fa05c18d1748062cac13a4cda41b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:45:32,297 INFO [StoreOpener-806fa05c18d1748062cac13a4cda41b0-1 {}] regionserver.HStore(327): Store=806fa05c18d1748062cac13a4cda41b0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:45:32,297 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:32,299 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:32,299 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:32,300 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:32,300 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:32,303 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:32,307 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:45:32,307 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 806fa05c18d1748062cac13a4cda41b0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808736, jitterRate=0.028361618518829346}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T18:45:32,308 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:32,309 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 806fa05c18d1748062cac13a4cda41b0: Running coprocessor pre-open hook at 1731955532291Writing region info on filesystem at 1731955532291Initializing all the Stores at 1731955532293 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955532293Cleaning up temporary data from old regions at 1731955532300 (+7 ms)Running coprocessor post-open hooks at 1731955532308 (+8 ms)Region opened successfully at 1731955532308 2024-11-18T18:45:32,311 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0., pid=6, masterSystemTime=1731955532280 2024-11-18T18:45:32,315 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:45:32,315 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:45:32,316 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=806fa05c18d1748062cac13a4cda41b0, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,34885,1731955529476 2024-11-18T18:45:32,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 806fa05c18d1748062cac13a4cda41b0, server=39fff3b0f89c,34885,1731955529476 because future has completed 2024-11-18T18:45:32,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T18:45:32,326 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 806fa05c18d1748062cac13a4cda41b0, server=39fff3b0f89c,34885,1731955529476 in 196 msec 2024-11-18T18:45:32,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T18:45:32,331 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=806fa05c18d1748062cac13a4cda41b0, ASSIGN in 361 msec 2024-11-18T18:45:32,333 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T18:45:32,333 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731955532333"}]},"ts":"1731955532333"} 2024-11-18T18:45:32,336 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-18T18:45:32,338 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T18:45:32,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 464 msec 2024-11-18T18:45:36,901 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-18T18:45:36,963 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T18:45:36,965 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-18T18:45:39,035 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T18:45:39,035 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T18:45:39,037 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-18T18:45:39,037 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T18:45:39,038 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:45:39,038 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T18:45:39,038 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T18:45:39,038 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-18T18:45:41,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T18:45:41,911 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-18T18:45:41,914 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-18T18:45:41,922 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-18T18:45:41,923 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:45:41,924 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C34885%2C1731955529476.1731955541924 2024-11-18T18:45:41,935 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:45:41,935 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:45:41,935 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:45:41,935 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:45:41,935 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:45:41,936 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955530854 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955541924 2024-11-18T18:45:41,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741833_1009 (size=451) 2024-11-18T18:45:41,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741833_1009 (size=451) 2024-11-18T18:45:41,947 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37669:37669),(127.0.0.1/127.0.0.1:34961:34961)] 2024-11-18T18:45:41,948 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955530854 to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/oldWALs/39fff3b0f89c%2C34885%2C1731955529476.1731955530854 2024-11-18T18:45:41,957 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0., hostname=39fff3b0f89c,34885,1731955529476, seqNum=2] 2024-11-18T18:45:53,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34885 {}] regionserver.HRegion(8855): Flush requested on 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:45:53,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 806fa05c18d1748062cac13a4cda41b0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T18:45:54,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/45c16a642f2c492c8b68d10a7e9ad263 is 1080, key is row0001/info:/1731955541961/Put/seqid=0 2024-11-18T18:45:54,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741838_1014 (size=12509) 2024-11-18T18:45:54,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741838_1014 (size=12509) 2024-11-18T18:45:54,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/45c16a642f2c492c8b68d10a7e9ad263 2024-11-18T18:45:54,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/45c16a642f2c492c8b68d10a7e9ad263 as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/45c16a642f2c492c8b68d10a7e9ad263 2024-11-18T18:45:54,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/45c16a642f2c492c8b68d10a7e9ad263, entries=7, sequenceid=11, filesize=12.2 K 2024-11-18T18:45:54,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 806fa05c18d1748062cac13a4cda41b0 in 550ms, sequenceid=11, compaction requested=false 2024-11-18T18:45:54,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 806fa05c18d1748062cac13a4cda41b0: 2024-11-18T18:45:57,838 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T18:46:02,010 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C34885%2C1731955529476.1731955562010 2024-11-18T18:46:02,219 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:02,219 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:02,219 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:02,219 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:02,220 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:02,220 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:02,220 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955541924 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955562010 2024-11-18T18:46:02,221 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37669:37669),(127.0.0.1/127.0.0.1:34961:34961)] 2024-11-18T18:46:02,221 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955541924 is not closed yet, will try archiving it next time 2024-11-18T18:46:02,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741837_1013 (size=12399) 2024-11-18T18:46:02,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741837_1013 (size=12399) 2024-11-18T18:46:02,425 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:04,629 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:06,834 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:09,039 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:09,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34885 {}] regionserver.HRegion(8855): Flush requested on 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:46:09,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 806fa05c18d1748062cac13a4cda41b0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T18:46:09,242 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:09,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/81eca54529eb4505a191d1298978bef1 is 1080, key is row0008/info:/1731955555998/Put/seqid=0 2024-11-18T18:46:09,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741840_1016 (size=12509) 2024-11-18T18:46:09,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741840_1016 (size=12509) 2024-11-18T18:46:09,264 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/81eca54529eb4505a191d1298978bef1 2024-11-18T18:46:09,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/81eca54529eb4505a191d1298978bef1 as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/81eca54529eb4505a191d1298978bef1 2024-11-18T18:46:09,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/81eca54529eb4505a191d1298978bef1, entries=7, sequenceid=21, filesize=12.2 K 2024-11-18T18:46:09,490 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:09,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 806fa05c18d1748062cac13a4cda41b0 in 450ms, sequenceid=21, compaction requested=false 2024-11-18T18:46:09,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 806fa05c18d1748062cac13a4cda41b0: 2024-11-18T18:46:09,490 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-18T18:46:09,490 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:46:09,491 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/45c16a642f2c492c8b68d10a7e9ad263 because midkey is the same as first or last row 2024-11-18T18:46:11,244 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:11,723 INFO [master/39fff3b0f89c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T18:46:11,723 INFO [master/39fff3b0f89c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T18:46:13,449 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:13,452 WARN [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:13,453 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C34885%2C1731955529476:(num 1731955562010) roll requested 2024-11-18T18:46:13,454 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C34885%2C1731955529476.1731955573454 2024-11-18T18:46:13,664 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:13,664 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:13,665 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:13,665 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:13,665 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:13,665 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:13,665 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955562010 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955573454 2024-11-18T18:46:13,666 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34961:34961),(127.0.0.1/127.0.0.1:37669:37669)] 2024-11-18T18:46:13,667 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955562010 is not closed yet, will try archiving it next time 2024-11-18T18:46:13,667 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955541924 to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/oldWALs/39fff3b0f89c%2C34885%2C1731955529476.1731955541924 2024-11-18T18:46:13,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741839_1015 (size=7739) 2024-11-18T18:46:13,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741839_1015 (size=7739) 2024-11-18T18:46:15,653 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK], DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK]] 2024-11-18T18:46:17,291 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 806fa05c18d1748062cac13a4cda41b0, had cached 0 bytes from a total of 25018 2024-11-18T18:46:17,858 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK], DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK]] 2024-11-18T18:46:20,062 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK], DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK]] 2024-11-18T18:46:22,267 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK], DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK]] 2024-11-18T18:46:24,269 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T18:46:24,270 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C34885%2C1731955529476.1731955584270 2024-11-18T18:46:27,839 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T18:46:29,283 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK], DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK]] 2024-11-18T18:46:29,286 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK], DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK]] 2024-11-18T18:46:29,286 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C34885%2C1731955529476:(num 1731955584270) roll requested 2024-11-18T18:46:29,286 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:29,287 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:29,287 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:29,287 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:29,287 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:29,287 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955573454 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955584270 2024-11-18T18:46:29,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741841_1017 (size=4753) 2024-11-18T18:46:29,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741841_1017 (size=4753) 2024-11-18T18:46:29,294 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37669:37669),(127.0.0.1/127.0.0.1:34961:34961)] 2024-11-18T18:46:29,294 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955573454 is not closed yet, will try archiving it next time 2024-11-18T18:46:29,294 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C34885%2C1731955529476.1731955589294 2024-11-18T18:46:34,298 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:34,298 WARN [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:34,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34885 {}] regionserver.HRegion(8855): Flush requested on 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:46:34,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 806fa05c18d1748062cac13a4cda41b0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T18:46:34,308 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:34,308 WARN [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:36,300 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T18:46:39,302 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:39,302 WARN [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK], DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK]] 2024-11-18T18:46:39,302 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:39,302 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:39,302 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:39,303 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:39,303 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:39,303 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955584270 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955589294 2024-11-18T18:46:39,304 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34961:34961),(127.0.0.1/127.0.0.1:37669:37669)] 2024-11-18T18:46:39,304 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955584270 is not closed yet, will try archiving it next time 2024-11-18T18:46:39,304 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C34885%2C1731955529476:(num 1731955589294) roll requested 2024-11-18T18:46:39,305 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C34885%2C1731955529476.1731955599304 2024-11-18T18:46:39,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741842_1018 (size=1569) 2024-11-18T18:46:39,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741842_1018 (size=1569) 2024-11-18T18:46:39,309 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/13b9e4d8ed624d439047660a88a1101f is 1080, key is row0015/info:/1731955571042/Put/seqid=0 2024-11-18T18:46:39,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741844_1020 (size=12509) 2024-11-18T18:46:39,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741844_1020 (size=12509) 2024-11-18T18:46:39,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/13b9e4d8ed624d439047660a88a1101f 2024-11-18T18:46:39,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/13b9e4d8ed624d439047660a88a1101f as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/13b9e4d8ed624d439047660a88a1101f 2024-11-18T18:46:39,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/13b9e4d8ed624d439047660a88a1101f, entries=7, sequenceid=31, filesize=12.2 K 2024-11-18T18:46:44,315 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK], DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK]] 2024-11-18T18:46:44,315 WARN [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK], DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK]] 2024-11-18T18:46:44,351 INFO [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK], DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK]] 2024-11-18T18:46:44,351 WARN [FSHLog-0-hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9-prefix:39fff3b0f89c,34885,1731955529476 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39597,DS-5aec451d-60cb-435c-a9e5-e94273ae4e83,DISK], DatanodeInfoWithStorage[127.0.0.1:39253,DS-abc7a027-1505-4609-92b9-7f2c388ad08b,DISK]] 2024-11-18T18:46:44,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 806fa05c18d1748062cac13a4cda41b0 in 10053ms, sequenceid=31, compaction requested=true 2024-11-18T18:46:44,352 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 806fa05c18d1748062cac13a4cda41b0: 2024-11-18T18:46:44,352 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,352 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,352 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-18T18:46:44,352 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:46:44,352 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,352 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,352 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/45c16a642f2c492c8b68d10a7e9ad263 because midkey is the same as first or last row 2024-11-18T18:46:44,352 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955589294 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955599304 2024-11-18T18:46:44,354 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34961:34961),(127.0.0.1/127.0.0.1:37669:37669)] 2024-11-18T18:46:44,354 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955589294 is not closed yet, will try archiving it next time 2024-11-18T18:46:44,354 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955562010 to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/oldWALs/39fff3b0f89c%2C34885%2C1731955529476.1731955562010 2024-11-18T18:46:44,354 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C34885%2C1731955529476:(num 1731955604354) roll requested 2024-11-18T18:46:44,355 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C34885%2C1731955529476.1731955604354 2024-11-18T18:46:44,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741843_1019 (size=438) 2024-11-18T18:46:44,355 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 806fa05c18d1748062cac13a4cda41b0:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:46:44,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741843_1019 (size=438) 2024-11-18T18:46:44,357 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955573454 to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/oldWALs/39fff3b0f89c%2C34885%2C1731955529476.1731955573454 2024-11-18T18:46:44,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:46:44,359 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:46:44,359 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955584270 to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/oldWALs/39fff3b0f89c%2C34885%2C1731955529476.1731955584270 2024-11-18T18:46:44,361 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955589294 to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/oldWALs/39fff3b0f89c%2C34885%2C1731955529476.1731955589294 2024-11-18T18:46:44,363 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:46:44,364 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.HStore(1541): 806fa05c18d1748062cac13a4cda41b0/info is initiating minor compaction (all files) 2024-11-18T18:46:44,365 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,365 INFO [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 806fa05c18d1748062cac13a4cda41b0/info in TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:46:44,365 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,365 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,365 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,365 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,365 INFO [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/45c16a642f2c492c8b68d10a7e9ad263, hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/81eca54529eb4505a191d1298978bef1, hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/13b9e4d8ed624d439047660a88a1101f] into tmpdir=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp, totalSize=36.6 K 2024-11-18T18:46:44,365 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955599304 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955604354 2024-11-18T18:46:44,367 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] compactions.Compactor(225): Compacting 45c16a642f2c492c8b68d10a7e9ad263, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731955541961 2024-11-18T18:46:44,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741845_1021 (size=93) 2024-11-18T18:46:44,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741845_1021 (size=93) 2024-11-18T18:46:44,368 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] compactions.Compactor(225): Compacting 81eca54529eb4505a191d1298978bef1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731955555998 2024-11-18T18:46:44,369 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955599304 to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/oldWALs/39fff3b0f89c%2C34885%2C1731955529476.1731955599304 2024-11-18T18:46:44,369 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] compactions.Compactor(225): Compacting 13b9e4d8ed624d439047660a88a1101f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731955571042 2024-11-18T18:46:44,388 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34961:34961),(127.0.0.1/127.0.0.1:37669:37669)] 2024-11-18T18:46:44,388 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C34885%2C1731955529476.1731955604388 2024-11-18T18:46:44,403 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,403 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,403 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,404 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,404 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:46:44,404 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955604354 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955604388 2024-11-18T18:46:44,405 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34961:34961),(127.0.0.1/127.0.0.1:37669:37669)] 2024-11-18T18:46:44,405 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/WALs/39fff3b0f89c,34885,1731955529476/39fff3b0f89c%2C34885%2C1731955529476.1731955604354 is not closed yet, will try archiving it next time 2024-11-18T18:46:44,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741846_1022 (size=1258) 2024-11-18T18:46:44,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741846_1022 (size=1258) 2024-11-18T18:46:44,414 INFO [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 806fa05c18d1748062cac13a4cda41b0#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:46:44,415 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/b4ba4ff4542b4116b5f08a7c184c52b7 is 1080, key is row0001/info:/1731955541961/Put/seqid=0 2024-11-18T18:46:44,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741848_1024 (size=27710) 2024-11-18T18:46:44,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741848_1024 (size=27710) 2024-11-18T18:46:44,436 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/b4ba4ff4542b4116b5f08a7c184c52b7 as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/b4ba4ff4542b4116b5f08a7c184c52b7 2024-11-18T18:46:44,455 INFO [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 806fa05c18d1748062cac13a4cda41b0/info of 806fa05c18d1748062cac13a4cda41b0 into b4ba4ff4542b4116b5f08a7c184c52b7(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:46:44,456 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 806fa05c18d1748062cac13a4cda41b0: 2024-11-18T18:46:44,459 INFO [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0., storeName=806fa05c18d1748062cac13a4cda41b0/info, priority=13, startTime=1731955604354; duration=0sec 2024-11-18T18:46:44,459 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T18:46:44,459 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:46:44,459 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/b4ba4ff4542b4116b5f08a7c184c52b7 because midkey is the same as first or last row 2024-11-18T18:46:44,460 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T18:46:44,460 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:46:44,460 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/b4ba4ff4542b4116b5f08a7c184c52b7 because midkey is the same as first or last row 2024-11-18T18:46:44,460 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T18:46:44,460 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:46:44,460 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/b4ba4ff4542b4116b5f08a7c184c52b7 because midkey is the same as first or last row 2024-11-18T18:46:44,460 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:46:44,461 DEBUG [RS:0;39fff3b0f89c:34885-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 806fa05c18d1748062cac13a4cda41b0:info 2024-11-18T18:46:56,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34885 {}] regionserver.HRegion(8855): Flush requested on 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:46:56,417 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 806fa05c18d1748062cac13a4cda41b0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T18:46:56,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/0af48cfaa4494649ae26e36471d96c23 is 1080, key is row0022/info:/1731955604390/Put/seqid=0 2024-11-18T18:46:56,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741849_1025 (size=12509) 2024-11-18T18:46:56,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741849_1025 (size=12509) 2024-11-18T18:46:56,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/0af48cfaa4494649ae26e36471d96c23 2024-11-18T18:46:56,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/0af48cfaa4494649ae26e36471d96c23 as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/0af48cfaa4494649ae26e36471d96c23 2024-11-18T18:46:56,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/0af48cfaa4494649ae26e36471d96c23, entries=7, sequenceid=42, filesize=12.2 K 2024-11-18T18:46:56,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 806fa05c18d1748062cac13a4cda41b0 in 48ms, sequenceid=42, compaction requested=false 2024-11-18T18:46:56,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 806fa05c18d1748062cac13a4cda41b0: 2024-11-18T18:46:56,465 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-18T18:46:56,465 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:46:56,466 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/b4ba4ff4542b4116b5f08a7c184c52b7 because midkey is the same as first or last row 2024-11-18T18:46:57,839 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T18:47:02,291 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 806fa05c18d1748062cac13a4cda41b0, had cached 0 bytes from a total of 40219 2024-11-18T18:47:04,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T18:47:04,434 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:47:04,434 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:47:04,440 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:04,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:04,441 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T18:47:04,442 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T18:47:04,442 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=372944084, stopped=false 2024-11-18T18:47:04,442 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39fff3b0f89c,34897,1731955528708 2024-11-18T18:47:04,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:47:04,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:47:04,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:04,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:04,487 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:47:04,487 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:47:04,488 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:47:04,488 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:47:04,488 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:47:04,488 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:04,489 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39fff3b0f89c,34885,1731955529476' ***** 2024-11-18T18:47:04,489 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T18:47:04,489 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T18:47:04,490 INFO [RS:0;39fff3b0f89c:34885 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T18:47:04,490 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T18:47:04,490 INFO [RS:0;39fff3b0f89c:34885 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T18:47:04,490 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(3091): Received CLOSE for 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:47:04,491 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(959): stopping server 39fff3b0f89c,34885,1731955529476 2024-11-18T18:47:04,491 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:47:04,491 INFO [RS:0;39fff3b0f89c:34885 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39fff3b0f89c:34885. 2024-11-18T18:47:04,491 DEBUG [RS:0;39fff3b0f89c:34885 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:47:04,492 DEBUG [RS:0;39fff3b0f89c:34885 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:04,492 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 806fa05c18d1748062cac13a4cda41b0, disabling compactions & flushes 2024-11-18T18:47:04,492 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:47:04,492 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T18:47:04,492 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T18:47:04,492 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:47:04,492 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T18:47:04,492 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. after waiting 0 ms 2024-11-18T18:47:04,492 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:47:04,492 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T18:47:04,493 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 806fa05c18d1748062cac13a4cda41b0 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-18T18:47:04,493 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T18:47:04,493 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:47:04,493 DEBUG [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 806fa05c18d1748062cac13a4cda41b0=TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.} 2024-11-18T18:47:04,493 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:47:04,493 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:47:04,493 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:47:04,493 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:47:04,493 DEBUG [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 806fa05c18d1748062cac13a4cda41b0 2024-11-18T18:47:04,493 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-18T18:47:04,500 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/e4700894784a409a9b1850aa3544a979 is 1080, key is row0029/info:/1731955618420/Put/seqid=0 2024-11-18T18:47:04,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741850_1026 (size=8193) 2024-11-18T18:47:04,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741850_1026 (size=8193) 2024-11-18T18:47:04,519 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/.tmp/info/c4b8a34ad74745b49e4b5d0274035993 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0./info:regioninfo/1731955532316/Put/seqid=0 2024-11-18T18:47:04,519 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/e4700894784a409a9b1850aa3544a979 2024-11-18T18:47:04,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741851_1027 (size=7016) 2024-11-18T18:47:04,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741851_1027 (size=7016) 2024-11-18T18:47:04,534 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/.tmp/info/c4b8a34ad74745b49e4b5d0274035993 2024-11-18T18:47:04,538 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/.tmp/info/e4700894784a409a9b1850aa3544a979 as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/e4700894784a409a9b1850aa3544a979 2024-11-18T18:47:04,552 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/e4700894784a409a9b1850aa3544a979, entries=3, sequenceid=48, filesize=8.0 K 2024-11-18T18:47:04,554 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 806fa05c18d1748062cac13a4cda41b0 in 62ms, sequenceid=48, compaction requested=true 2024-11-18T18:47:04,555 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/45c16a642f2c492c8b68d10a7e9ad263, hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/81eca54529eb4505a191d1298978bef1, hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/13b9e4d8ed624d439047660a88a1101f] to archive 2024-11-18T18:47:04,559 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T18:47:04,563 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/45c16a642f2c492c8b68d10a7e9ad263 to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/45c16a642f2c492c8b68d10a7e9ad263 2024-11-18T18:47:04,564 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/.tmp/ns/5276d620451e44579207a39579bc4bd4 is 43, key is default/ns:d/1731955531567/Put/seqid=0 2024-11-18T18:47:04,566 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/81eca54529eb4505a191d1298978bef1 to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/81eca54529eb4505a191d1298978bef1 2024-11-18T18:47:04,568 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/13b9e4d8ed624d439047660a88a1101f to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/archive/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/info/13b9e4d8ed624d439047660a88a1101f 2024-11-18T18:47:04,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741852_1028 (size=5153) 2024-11-18T18:47:04,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741852_1028 (size=5153) 2024-11-18T18:47:04,575 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/.tmp/ns/5276d620451e44579207a39579bc4bd4 2024-11-18T18:47:04,584 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=39fff3b0f89c:34897 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-18T18:47:04,590 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [45c16a642f2c492c8b68d10a7e9ad263=12509, 81eca54529eb4505a191d1298978bef1=12509, 13b9e4d8ed624d439047660a88a1101f=12509] 2024-11-18T18:47:04,596 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/default/TestLogRolling-testSlowSyncLogRolling/806fa05c18d1748062cac13a4cda41b0/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-18T18:47:04,599 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:47:04,599 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 806fa05c18d1748062cac13a4cda41b0: Waiting for close lock at 1731955624491Running coprocessor pre-close hooks at 1731955624492 (+1 ms)Disabling compacts and flushes for region at 1731955624492Disabling writes for close at 1731955624492Obtaining lock to block concurrent updates at 1731955624493 (+1 ms)Preparing flush snapshotting stores in 806fa05c18d1748062cac13a4cda41b0 at 1731955624493Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731955624493Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. at 1731955624494 (+1 ms)Flushing 806fa05c18d1748062cac13a4cda41b0/info: creating writer at 1731955624494Flushing 806fa05c18d1748062cac13a4cda41b0/info: appending metadata at 1731955624499 (+5 ms)Flushing 806fa05c18d1748062cac13a4cda41b0/info: closing flushed file at 1731955624499Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1303e868: reopening flushed file at 1731955624536 (+37 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 806fa05c18d1748062cac13a4cda41b0 in 62ms, sequenceid=48, compaction requested=true at 1731955624555 (+19 ms)Writing region close event to WAL at 1731955624591 (+36 ms)Running coprocessor post-close hooks at 1731955624597 (+6 ms)Closed at 1731955624599 (+2 ms) 2024-11-18T18:47:04,600 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731955531867.806fa05c18d1748062cac13a4cda41b0. 2024-11-18T18:47:04,605 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/.tmp/table/44736d1094054f21aaa6a84961e9291e is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731955532333/Put/seqid=0 2024-11-18T18:47:04,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741853_1029 (size=5396) 2024-11-18T18:47:04,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741853_1029 (size=5396) 2024-11-18T18:47:04,612 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/.tmp/table/44736d1094054f21aaa6a84961e9291e 2024-11-18T18:47:04,622 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/.tmp/info/c4b8a34ad74745b49e4b5d0274035993 as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/info/c4b8a34ad74745b49e4b5d0274035993 2024-11-18T18:47:04,632 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/info/c4b8a34ad74745b49e4b5d0274035993, entries=10, sequenceid=11, filesize=6.9 K 2024-11-18T18:47:04,633 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/.tmp/ns/5276d620451e44579207a39579bc4bd4 as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/ns/5276d620451e44579207a39579bc4bd4 2024-11-18T18:47:04,642 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/ns/5276d620451e44579207a39579bc4bd4, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T18:47:04,644 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/.tmp/table/44736d1094054f21aaa6a84961e9291e as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/table/44736d1094054f21aaa6a84961e9291e 2024-11-18T18:47:04,654 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/table/44736d1094054f21aaa6a84961e9291e, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T18:47:04,656 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 163ms, sequenceid=11, compaction requested=false 2024-11-18T18:47:04,663 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T18:47:04,664 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:47:04,665 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:47:04,665 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955624493Running coprocessor pre-close hooks at 1731955624493Disabling compacts and flushes for region at 1731955624493Disabling writes for close at 1731955624493Obtaining lock to block concurrent updates at 1731955624493Preparing flush snapshotting stores in 1588230740 at 1731955624493Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731955624494 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731955624495 (+1 ms)Flushing 1588230740/info: creating writer at 1731955624495Flushing 1588230740/info: appending metadata at 1731955624517 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731955624517Flushing 1588230740/ns: creating writer at 1731955624544 (+27 ms)Flushing 1588230740/ns: appending metadata at 1731955624563 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731955624563Flushing 1588230740/table: creating writer at 1731955624585 (+22 ms)Flushing 1588230740/table: appending metadata at 1731955624604 (+19 ms)Flushing 1588230740/table: closing flushed file at 1731955624604Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@385412e3: reopening flushed file at 1731955624621 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57484b6: reopening flushed file at 1731955624632 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cb687e9: reopening flushed file at 1731955624643 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 163ms, sequenceid=11, compaction requested=false at 1731955624656 (+13 ms)Writing region close event to WAL at 1731955624657 (+1 ms)Running coprocessor post-close hooks at 1731955624664 (+7 ms)Closed at 1731955624665 (+1 ms) 2024-11-18T18:47:04,665 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T18:47:04,694 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(976): stopping server 39fff3b0f89c,34885,1731955529476; all regions closed. 2024-11-18T18:47:04,695 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:04,695 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:04,696 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:04,696 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:04,696 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:04,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741834_1010 (size=3066) 2024-11-18T18:47:04,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741834_1010 (size=3066) 2024-11-18T18:47:04,705 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T18:47:04,705 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T18:47:04,708 INFO [regionserver/39fff3b0f89c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:47:05,103 DEBUG [RS:0;39fff3b0f89c:34885 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/oldWALs 2024-11-18T18:47:05,103 INFO [RS:0;39fff3b0f89c:34885 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C34885%2C1731955529476.meta:.meta(num 1731955531358) 2024-11-18T18:47:05,104 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:05,104 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:05,104 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:05,104 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:05,104 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:05,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741847_1023 (size=12695) 2024-11-18T18:47:05,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741847_1023 (size=12695) 2024-11-18T18:47:05,110 DEBUG [RS:0;39fff3b0f89c:34885 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/oldWALs 2024-11-18T18:47:05,111 INFO [RS:0;39fff3b0f89c:34885 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C34885%2C1731955529476:(num 1731955604388) 2024-11-18T18:47:05,111 DEBUG [RS:0;39fff3b0f89c:34885 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:05,111 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:47:05,111 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:47:05,111 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.ChoreService(370): Chore service for: regionserver/39fff3b0f89c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T18:47:05,111 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:47:05,111 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:47:05,112 INFO [RS:0;39fff3b0f89c:34885 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34885 2024-11-18T18:47:05,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39fff3b0f89c,34885,1731955529476 2024-11-18T18:47:05,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:47:05,167 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:47:05,168 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39fff3b0f89c,34885,1731955529476] 2024-11-18T18:47:05,203 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39fff3b0f89c,34885,1731955529476 already deleted, retry=false 2024-11-18T18:47:05,203 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39fff3b0f89c,34885,1731955529476 expired; onlineServers=0 2024-11-18T18:47:05,204 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39fff3b0f89c,34897,1731955528708' ***** 2024-11-18T18:47:05,204 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T18:47:05,204 INFO [M:0;39fff3b0f89c:34897 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:47:05,204 INFO [M:0;39fff3b0f89c:34897 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:47:05,204 DEBUG [M:0;39fff3b0f89c:34897 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T18:47:05,205 DEBUG [M:0;39fff3b0f89c:34897 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T18:47:05,205 INFO [M:0;39fff3b0f89c:34897 {}] hbase.ChoreService(370): Chore service for: master/39fff3b0f89c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T18:47:05,205 INFO [M:0;39fff3b0f89c:34897 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:47:05,205 DEBUG [M:0;39fff3b0f89c:34897 {}] master.HMaster(1795): Stopping service threads 2024-11-18T18:47:05,206 INFO [M:0;39fff3b0f89c:34897 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T18:47:05,206 INFO [M:0;39fff3b0f89c:34897 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:47:05,207 INFO [M:0;39fff3b0f89c:34897 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T18:47:05,208 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T18:47:05,210 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T18:47:05,210 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955530594 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955530594,5,FailOnTimeoutGroup] 2024-11-18T18:47:05,210 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955530593 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955530593,5,FailOnTimeoutGroup] 2024-11-18T18:47:05,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T18:47:05,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:05,228 DEBUG [M:0;39fff3b0f89c:34897 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-18T18:47:05,228 DEBUG [M:0;39fff3b0f89c:34897 {}] master.ActiveMasterManager(353): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-18T18:47:05,229 INFO [M:0;39fff3b0f89c:34897 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/.lastflushedseqids 2024-11-18T18:47:05,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741854_1030 (size=130) 2024-11-18T18:47:05,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741854_1030 (size=130) 2024-11-18T18:47:05,245 INFO [M:0;39fff3b0f89c:34897 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T18:47:05,245 INFO [M:0;39fff3b0f89c:34897 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T18:47:05,246 DEBUG [M:0;39fff3b0f89c:34897 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:47:05,246 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:05,246 DEBUG [M:0;39fff3b0f89c:34897 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:05,246 DEBUG [M:0;39fff3b0f89c:34897 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:47:05,246 DEBUG [M:0;39fff3b0f89c:34897 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:05,246 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-18T18:47:05,267 DEBUG [M:0;39fff3b0f89c:34897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14ac8c81d5724a5f99465a62a5d4c6dd is 82, key is hbase:meta,,1/info:regioninfo/1731955531450/Put/seqid=0 2024-11-18T18:47:05,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741855_1031 (size=5672) 2024-11-18T18:47:05,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741855_1031 (size=5672) 2024-11-18T18:47:05,274 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14ac8c81d5724a5f99465a62a5d4c6dd 2024-11-18T18:47:05,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:05,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34885-0x101508dc2bb0001, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:05,292 INFO [RS:0;39fff3b0f89c:34885 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:47:05,292 INFO [RS:0;39fff3b0f89c:34885 {}] regionserver.HRegionServer(1031): Exiting; stopping=39fff3b0f89c,34885,1731955529476; zookeeper connection closed. 2024-11-18T18:47:05,292 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3a358960 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3a358960 2024-11-18T18:47:05,293 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T18:47:05,301 DEBUG [M:0;39fff3b0f89c:34897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6464e45d75f84005a80fe58dae3a152a is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731955532341/Put/seqid=0 2024-11-18T18:47:05,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741856_1032 (size=6248) 2024-11-18T18:47:05,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741856_1032 (size=6248) 2024-11-18T18:47:05,312 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6464e45d75f84005a80fe58dae3a152a 2024-11-18T18:47:05,319 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6464e45d75f84005a80fe58dae3a152a 2024-11-18T18:47:05,341 DEBUG [M:0;39fff3b0f89c:34897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c702d3a2642d434cb3013b95b856d730 is 69, key is 39fff3b0f89c,34885,1731955529476/rs:state/1731955530621/Put/seqid=0 2024-11-18T18:47:05,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741857_1033 (size=5156) 2024-11-18T18:47:05,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741857_1033 (size=5156) 2024-11-18T18:47:05,354 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c702d3a2642d434cb3013b95b856d730 2024-11-18T18:47:05,381 DEBUG [M:0;39fff3b0f89c:34897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fee97051846f42ba9f3419ff566119a5 is 52, key is load_balancer_on/state:d/1731955531848/Put/seqid=0 2024-11-18T18:47:05,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741858_1034 (size=5056) 2024-11-18T18:47:05,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741858_1034 (size=5056) 2024-11-18T18:47:05,389 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fee97051846f42ba9f3419ff566119a5 2024-11-18T18:47:05,399 DEBUG [M:0;39fff3b0f89c:34897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/14ac8c81d5724a5f99465a62a5d4c6dd as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/14ac8c81d5724a5f99465a62a5d4c6dd 2024-11-18T18:47:05,408 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/14ac8c81d5724a5f99465a62a5d4c6dd, entries=8, sequenceid=59, filesize=5.5 K 2024-11-18T18:47:05,409 DEBUG [M:0;39fff3b0f89c:34897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6464e45d75f84005a80fe58dae3a152a as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6464e45d75f84005a80fe58dae3a152a 2024-11-18T18:47:05,418 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6464e45d75f84005a80fe58dae3a152a 2024-11-18T18:47:05,418 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6464e45d75f84005a80fe58dae3a152a, entries=6, sequenceid=59, filesize=6.1 K 2024-11-18T18:47:05,420 DEBUG [M:0;39fff3b0f89c:34897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c702d3a2642d434cb3013b95b856d730 as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c702d3a2642d434cb3013b95b856d730 2024-11-18T18:47:05,428 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c702d3a2642d434cb3013b95b856d730, entries=1, sequenceid=59, filesize=5.0 K 2024-11-18T18:47:05,430 DEBUG [M:0;39fff3b0f89c:34897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fee97051846f42ba9f3419ff566119a5 as hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fee97051846f42ba9f3419ff566119a5 2024-11-18T18:47:05,439 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fee97051846f42ba9f3419ff566119a5, entries=1, sequenceid=59, filesize=4.9 K 2024-11-18T18:47:05,440 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 194ms, sequenceid=59, compaction requested=false 2024-11-18T18:47:05,442 INFO [M:0;39fff3b0f89c:34897 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:05,442 DEBUG [M:0;39fff3b0f89c:34897 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955625246Disabling compacts and flushes for region at 1731955625246Disabling writes for close at 1731955625246Obtaining lock to block concurrent updates at 1731955625246Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731955625246Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731955625247 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731955625248 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731955625248Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731955625266 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731955625267 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731955625282 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731955625301 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731955625301Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731955625319 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731955625340 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731955625340Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731955625363 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731955625380 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731955625380Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3cb6eee6: reopening flushed file at 1731955625398 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@123e527e: reopening flushed file at 1731955625408 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d90e2c9: reopening flushed file at 1731955625418 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17abd936: reopening flushed file at 1731955625429 (+11 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 194ms, sequenceid=59, compaction requested=false at 1731955625440 (+11 ms)Writing region close event to WAL at 1731955625442 (+2 ms)Closed at 1731955625442 2024-11-18T18:47:05,443 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:05,443 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:05,444 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:05,444 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:05,444 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:05,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39597 is added to blk_1073741830_1006 (size=27985) 2024-11-18T18:47:05,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39253 is added to blk_1073741830_1006 (size=27985) 2024-11-18T18:47:05,448 INFO [M:0;39fff3b0f89c:34897 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T18:47:05,448 INFO [M:0;39fff3b0f89c:34897 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34897 2024-11-18T18:47:05,448 INFO [M:0;39fff3b0f89c:34897 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:47:05,449 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:47:05,557 INFO [M:0;39fff3b0f89c:34897 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:47:05,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:05,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34897-0x101508dc2bb0000, quorum=127.0.0.1:50315, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:05,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c963ecd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:05,589 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a10aed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:05,589 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:05,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25ca9bb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:05,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ff5148a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:05,600 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:47:05,600 WARN [BP-2001088950-172.17.0.2-1731955524707 heartbeating to localhost/127.0.0.1:35907 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:47:05,600 WARN [BP-2001088950-172.17.0.2-1731955524707 heartbeating to localhost/127.0.0.1:35907 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2001088950-172.17.0.2-1731955524707 (Datanode Uuid 120b48cb-4ef7-4503-8f50-9711d2401e6c) service to localhost/127.0.0.1:35907 2024-11-18T18:47:05,600 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:47:05,602 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3/data/data3/current/BP-2001088950-172.17.0.2-1731955524707 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:05,602 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3/data/data4/current/BP-2001088950-172.17.0.2-1731955524707 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:05,602 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:47:05,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f93babe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:05,605 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@737d6c99{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:05,605 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:05,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7893eb07{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:05,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3305dd74{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:05,607 WARN [BP-2001088950-172.17.0.2-1731955524707 heartbeating to localhost/127.0.0.1:35907 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:47:05,607 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:47:05,607 WARN [BP-2001088950-172.17.0.2-1731955524707 heartbeating to localhost/127.0.0.1:35907 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2001088950-172.17.0.2-1731955524707 (Datanode Uuid 59da6ac4-74e9-4811-8809-bda8259b5bc1) service to localhost/127.0.0.1:35907 2024-11-18T18:47:05,607 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:47:05,608 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3/data/data1/current/BP-2001088950-172.17.0.2-1731955524707 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:05,608 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/cluster_f42d2cad-321b-4358-0475-3fe1852d7fe3/data/data2/current/BP-2001088950-172.17.0.2-1731955524707 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:05,608 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:47:05,622 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de997b9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:47:05,623 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:05,623 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:05,624 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:05,624 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:05,635 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T18:47:05,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T18:47:05,692 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35907 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:35907 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/39fff3b0f89c:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@45316768 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/39fff3b0f89c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:35907 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:35907 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:35907 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35907 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/39fff3b0f89c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35907 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35907 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=442 (was 338) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4659 (was 5815) 2024-11-18T18:47:05,700 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=405, MaxFileDescriptor=1048576, SystemLoadAverage=442, ProcessCount=11, AvailableMemoryMB=4658 2024-11-18T18:47:05,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T18:47:05,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/hadoop.log.dir so I do NOT create it in target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f 2024-11-18T18:47:05,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44c19187-1ce1-3abc-77fb-d6f6da4bf2b2/hadoop.tmp.dir so I do NOT create it in target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f 2024-11-18T18:47:05,701 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40, deleteOnExit=true 2024-11-18T18:47:05,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T18:47:05,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/test.cache.data in system properties and HBase conf 2024-11-18T18:47:05,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T18:47:05,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/hadoop.log.dir in system properties and HBase conf 2024-11-18T18:47:05,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T18:47:05,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T18:47:05,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T18:47:05,702 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T18:47:05,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:47:05,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:47:05,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T18:47:05,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:47:05,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T18:47:05,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T18:47:05,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:47:05,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:47:05,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T18:47:05,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/nfs.dump.dir in system properties and HBase conf 2024-11-18T18:47:05,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/java.io.tmpdir in system properties and HBase conf 2024-11-18T18:47:05,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:47:05,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T18:47:05,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T18:47:05,719 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:47:06,067 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:06,075 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:06,078 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:06,078 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:06,078 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:47:06,084 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:06,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4340a53c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:06,089 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c8f0dfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:06,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b839c20{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/java.io.tmpdir/jetty-localhost-34403-hadoop-hdfs-3_4_1-tests_jar-_-any-17097863135970387734/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:47:06,198 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d00b522{HTTP/1.1, (http/1.1)}{localhost:34403} 2024-11-18T18:47:06,198 INFO [Time-limited test {}] server.Server(415): Started @103587ms 2024-11-18T18:47:06,213 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:47:06,417 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:06,422 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:06,423 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:06,423 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:06,423 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:47:06,424 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f754f75{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:06,424 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3a779{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:06,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12742a74{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/java.io.tmpdir/jetty-localhost-40605-hadoop-hdfs-3_4_1-tests_jar-_-any-1325462820392201665/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:06,529 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f1e2b4c{HTTP/1.1, (http/1.1)}{localhost:40605} 2024-11-18T18:47:06,529 INFO [Time-limited test {}] server.Server(415): Started @103918ms 2024-11-18T18:47:06,531 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:47:06,587 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:06,591 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:06,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:06,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:06,591 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:47:06,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@592c88e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:06,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c5ed954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:06,697 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62c9cc57{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/java.io.tmpdir/jetty-localhost-39243-hadoop-hdfs-3_4_1-tests_jar-_-any-16372994244571367169/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:06,697 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3756c399{HTTP/1.1, (http/1.1)}{localhost:39243} 2024-11-18T18:47:06,698 INFO [Time-limited test {}] server.Server(415): Started @104086ms 2024-11-18T18:47:06,699 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:47:07,380 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40/data/data1/current/BP-1566644758-172.17.0.2-1731955625732/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:07,380 WARN [Thread-454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40/data/data2/current/BP-1566644758-172.17.0.2-1731955625732/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:07,406 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:47:07,409 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6617a129f197672 with lease ID 0x2d5deb115045a55b: Processing first storage report for DS-9fb270a2-d806-4268-9293-fa1c45f98483 from datanode DatanodeRegistration(127.0.0.1:43705, datanodeUuid=0435a990-007a-4309-a0f8-06cccf410010, infoPort=43151, infoSecurePort=0, ipcPort=33055, storageInfo=lv=-57;cid=testClusterID;nsid=2005502108;c=1731955625732) 2024-11-18T18:47:07,409 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6617a129f197672 with lease ID 0x2d5deb115045a55b: from storage DS-9fb270a2-d806-4268-9293-fa1c45f98483 node DatanodeRegistration(127.0.0.1:43705, datanodeUuid=0435a990-007a-4309-a0f8-06cccf410010, infoPort=43151, infoSecurePort=0, ipcPort=33055, storageInfo=lv=-57;cid=testClusterID;nsid=2005502108;c=1731955625732), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:07,409 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6617a129f197672 with lease ID 0x2d5deb115045a55b: Processing first storage report for DS-e66e1015-8c5a-4632-9eed-fc4208792172 from datanode DatanodeRegistration(127.0.0.1:43705, datanodeUuid=0435a990-007a-4309-a0f8-06cccf410010, infoPort=43151, infoSecurePort=0, ipcPort=33055, storageInfo=lv=-57;cid=testClusterID;nsid=2005502108;c=1731955625732) 2024-11-18T18:47:07,409 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6617a129f197672 with lease ID 0x2d5deb115045a55b: from storage DS-e66e1015-8c5a-4632-9eed-fc4208792172 node DatanodeRegistration(127.0.0.1:43705, datanodeUuid=0435a990-007a-4309-a0f8-06cccf410010, infoPort=43151, infoSecurePort=0, ipcPort=33055, storageInfo=lv=-57;cid=testClusterID;nsid=2005502108;c=1731955625732), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:07,535 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40/data/data3/current/BP-1566644758-172.17.0.2-1731955625732/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:07,537 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40/data/data4/current/BP-1566644758-172.17.0.2-1731955625732/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:07,555 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:47:07,558 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24dc2dcdb1f35136 with lease ID 0x2d5deb115045a55c: Processing first storage report for DS-31f5c20f-8e3a-45fd-a5f2-95781b417bf3 from datanode DatanodeRegistration(127.0.0.1:43439, datanodeUuid=11466d62-24dc-406d-8acc-462e1a46d029, infoPort=35629, infoSecurePort=0, ipcPort=40029, storageInfo=lv=-57;cid=testClusterID;nsid=2005502108;c=1731955625732) 2024-11-18T18:47:07,558 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24dc2dcdb1f35136 with lease ID 0x2d5deb115045a55c: from storage DS-31f5c20f-8e3a-45fd-a5f2-95781b417bf3 node DatanodeRegistration(127.0.0.1:43439, datanodeUuid=11466d62-24dc-406d-8acc-462e1a46d029, infoPort=35629, infoSecurePort=0, ipcPort=40029, storageInfo=lv=-57;cid=testClusterID;nsid=2005502108;c=1731955625732), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:07,558 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24dc2dcdb1f35136 with lease ID 0x2d5deb115045a55c: Processing first storage report for DS-d2d28654-e426-4035-9563-86497d81aedf from datanode DatanodeRegistration(127.0.0.1:43439, datanodeUuid=11466d62-24dc-406d-8acc-462e1a46d029, infoPort=35629, infoSecurePort=0, ipcPort=40029, storageInfo=lv=-57;cid=testClusterID;nsid=2005502108;c=1731955625732) 2024-11-18T18:47:07,559 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24dc2dcdb1f35136 with lease ID 0x2d5deb115045a55c: from storage DS-d2d28654-e426-4035-9563-86497d81aedf node DatanodeRegistration(127.0.0.1:43439, datanodeUuid=11466d62-24dc-406d-8acc-462e1a46d029, infoPort=35629, infoSecurePort=0, ipcPort=40029, storageInfo=lv=-57;cid=testClusterID;nsid=2005502108;c=1731955625732), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:07,638 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f 2024-11-18T18:47:07,641 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40/zookeeper_0, clientPort=54613, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T18:47:07,642 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54613 2024-11-18T18:47:07,642 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:07,644 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:07,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:47:07,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:47:07,660 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf with version=8 2024-11-18T18:47:07,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/hbase-staging 2024-11-18T18:47:07,662 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:47:07,662 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:07,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:07,663 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:47:07,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:07,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:47:07,663 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T18:47:07,663 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:47:07,664 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44243 2024-11-18T18:47:07,666 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44243 connecting to ZooKeeper ensemble=127.0.0.1:54613 2024-11-18T18:47:07,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:442430x0, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:47:07,734 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44243-0x101508f48610000 connected 2024-11-18T18:47:07,799 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:07,802 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:07,806 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:47:07,806 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf, hbase.cluster.distributed=false 2024-11-18T18:47:07,809 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:47:07,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44243 2024-11-18T18:47:07,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44243 2024-11-18T18:47:07,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44243 2024-11-18T18:47:07,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44243 2024-11-18T18:47:07,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44243 2024-11-18T18:47:07,832 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:47:07,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:07,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:07,832 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:47:07,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:07,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:47:07,832 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T18:47:07,833 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:47:07,833 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40051 2024-11-18T18:47:07,835 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40051 connecting to ZooKeeper ensemble=127.0.0.1:54613 2024-11-18T18:47:07,836 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:07,839 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:07,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:400510x0, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:47:07,849 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:400510x0, quorum=127.0.0.1:54613, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:47:07,849 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40051-0x101508f48610001 connected 2024-11-18T18:47:07,850 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T18:47:07,851 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T18:47:07,852 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T18:47:07,854 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:47:07,854 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40051 2024-11-18T18:47:07,859 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40051 2024-11-18T18:47:07,860 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40051 2024-11-18T18:47:07,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40051 2024-11-18T18:47:07,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40051 2024-11-18T18:47:07,895 DEBUG [M:0;39fff3b0f89c:44243 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39fff3b0f89c:44243 2024-11-18T18:47:07,895 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39fff3b0f89c,44243,1731955627662 2024-11-18T18:47:07,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:47:07,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:47:07,906 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39fff3b0f89c,44243,1731955627662 2024-11-18T18:47:07,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T18:47:07,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:07,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:07,920 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T18:47:07,921 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39fff3b0f89c,44243,1731955627662 from backup master directory 2024-11-18T18:47:07,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:47:07,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39fff3b0f89c,44243,1731955627662 2024-11-18T18:47:07,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:47:07,931 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:47:07,931 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39fff3b0f89c,44243,1731955627662 2024-11-18T18:47:07,937 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/hbase.id] with ID: b26e3518-a515-4486-8604-e6f950273a82 2024-11-18T18:47:07,937 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/.tmp/hbase.id 2024-11-18T18:47:07,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:47:07,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:47:07,946 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/.tmp/hbase.id]:[hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/hbase.id] 2024-11-18T18:47:07,965 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:07,965 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T18:47:07,968 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-18T18:47:07,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:07,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:07,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:47:07,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:47:07,985 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:47:07,986 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T18:47:07,987 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:47:07,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:47:07,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:47:07,999 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store 2024-11-18T18:47:08,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:47:08,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:47:08,009 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:47:08,009 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:47:08,009 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:08,009 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:08,009 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:47:08,009 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:08,009 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:08,010 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955628009Disabling compacts and flushes for region at 1731955628009Disabling writes for close at 1731955628009Writing region close event to WAL at 1731955628009Closed at 1731955628009 2024-11-18T18:47:08,011 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/.initializing 2024-11-18T18:47:08,012 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/WALs/39fff3b0f89c,44243,1731955627662 2024-11-18T18:47:08,015 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C44243%2C1731955627662, suffix=, logDir=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/WALs/39fff3b0f89c,44243,1731955627662, archiveDir=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/oldWALs, maxLogs=10 2024-11-18T18:47:08,016 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C44243%2C1731955627662.1731955628016 2024-11-18T18:47:08,023 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/WALs/39fff3b0f89c,44243,1731955627662/39fff3b0f89c%2C44243%2C1731955627662.1731955628016 2024-11-18T18:47:08,027 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35629:35629),(127.0.0.1/127.0.0.1:43151:43151)] 2024-11-18T18:47:08,028 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:47:08,028 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:47:08,029 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,029 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,031 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T18:47:08,033 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:08,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T18:47:08,036 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:47:08,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T18:47:08,041 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:47:08,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T18:47:08,044 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,044 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:47:08,044 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,045 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,046 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,047 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,047 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,048 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T18:47:08,050 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:08,054 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:47:08,055 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846317, jitterRate=0.07614867389202118}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T18:47:08,057 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731955628029Initializing all the Stores at 1731955628030 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955628030Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955628031 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955628031Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955628031Cleaning up temporary data from old regions at 1731955628047 (+16 ms)Region opened successfully at 1731955628056 (+9 ms) 2024-11-18T18:47:08,057 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T18:47:08,063 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d1448d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:47:08,064 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T18:47:08,064 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T18:47:08,064 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T18:47:08,064 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T18:47:08,065 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T18:47:08,066 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T18:47:08,066 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T18:47:08,069 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T18:47:08,070 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T18:47:08,080 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T18:47:08,081 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T18:47:08,082 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T18:47:08,090 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T18:47:08,091 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T18:47:08,092 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T18:47:08,098 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T18:47:08,099 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T18:47:08,107 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T18:47:08,110 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T18:47:08,115 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T18:47:08,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:47:08,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:47:08,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:08,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:08,124 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39fff3b0f89c,44243,1731955627662, sessionid=0x101508f48610000, setting cluster-up flag (Was=false) 2024-11-18T18:47:08,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:08,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:08,165 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T18:47:08,166 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,44243,1731955627662 2024-11-18T18:47:08,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:08,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:08,207 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T18:47:08,209 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,44243,1731955627662 2024-11-18T18:47:08,210 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T18:47:08,213 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T18:47:08,213 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T18:47:08,214 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T18:47:08,214 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39fff3b0f89c,44243,1731955627662 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T18:47:08,216 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:47:08,216 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:47:08,216 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:47:08,216 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:47:08,217 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39fff3b0f89c:0, corePoolSize=10, maxPoolSize=10 2024-11-18T18:47:08,217 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,217 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:47:08,217 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,218 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731955658218 2024-11-18T18:47:08,218 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T18:47:08,218 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T18:47:08,218 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T18:47:08,218 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T18:47:08,219 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T18:47:08,219 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T18:47:08,219 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,219 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:47:08,219 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T18:47:08,219 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T18:47:08,219 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T18:47:08,220 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T18:47:08,220 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T18:47:08,220 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T18:47:08,220 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955628220,5,FailOnTimeoutGroup] 2024-11-18T18:47:08,221 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955628220,5,FailOnTimeoutGroup] 2024-11-18T18:47:08,221 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,221 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T18:47:08,221 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,221 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,221 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,222 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T18:47:08,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:47:08,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:47:08,231 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T18:47:08,232 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf 2024-11-18T18:47:08,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:47:08,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:47:08,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:47:08,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:47:08,261 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:47:08,261 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:08,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:47:08,264 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:47:08,264 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,264 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:08,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:47:08,267 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:47:08,267 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:08,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:47:08,270 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:47:08,270 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,270 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:08,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:47:08,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/1588230740 2024-11-18T18:47:08,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/1588230740 2024-11-18T18:47:08,273 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:47:08,273 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:47:08,274 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:47:08,275 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:47:08,278 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:47:08,278 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(746): ClusterId : b26e3518-a515-4486-8604-e6f950273a82 2024-11-18T18:47:08,278 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T18:47:08,278 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=754799, jitterRate=-0.04022425413131714}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:47:08,279 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731955628257Initializing all the Stores at 1731955628258 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955628258Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955628259 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955628259Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955628259Cleaning up temporary data from old regions at 1731955628273 (+14 ms)Region opened successfully at 1731955628279 (+6 ms) 2024-11-18T18:47:08,280 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:47:08,280 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:47:08,280 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:47:08,280 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:47:08,280 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:47:08,281 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:47:08,281 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955628280Disabling compacts and flushes for region at 1731955628280Disabling writes for close at 1731955628280Writing region close event to WAL at 1731955628281 (+1 ms)Closed at 1731955628281 2024-11-18T18:47:08,282 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:47:08,283 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T18:47:08,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T18:47:08,285 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:47:08,287 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T18:47:08,290 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T18:47:08,290 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T18:47:08,299 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T18:47:08,300 DEBUG [RS:0;39fff3b0f89c:40051 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d647771, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:47:08,313 DEBUG [RS:0;39fff3b0f89c:40051 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39fff3b0f89c:40051 2024-11-18T18:47:08,313 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T18:47:08,313 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T18:47:08,313 DEBUG [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T18:47:08,314 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(2659): reportForDuty to master=39fff3b0f89c,44243,1731955627662 with port=40051, startcode=1731955627831 2024-11-18T18:47:08,315 DEBUG [RS:0;39fff3b0f89c:40051 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T18:47:08,318 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56617, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T18:47:08,318 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44243 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39fff3b0f89c,40051,1731955627831 2024-11-18T18:47:08,319 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44243 {}] master.ServerManager(517): Registering regionserver=39fff3b0f89c,40051,1731955627831 2024-11-18T18:47:08,321 DEBUG [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf 2024-11-18T18:47:08,321 DEBUG [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41775 2024-11-18T18:47:08,321 DEBUG [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T18:47:08,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:47:08,333 DEBUG [RS:0;39fff3b0f89c:40051 {}] zookeeper.ZKUtil(111): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39fff3b0f89c,40051,1731955627831 2024-11-18T18:47:08,333 WARN [RS:0;39fff3b0f89c:40051 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:47:08,333 INFO [RS:0;39fff3b0f89c:40051 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:47:08,333 DEBUG [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/WALs/39fff3b0f89c,40051,1731955627831 2024-11-18T18:47:08,333 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39fff3b0f89c,40051,1731955627831] 2024-11-18T18:47:08,338 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T18:47:08,343 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T18:47:08,347 INFO [RS:0;39fff3b0f89c:40051 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T18:47:08,347 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,348 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T18:47:08,349 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T18:47:08,349 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,349 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,350 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,350 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,350 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,350 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,350 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:47:08,350 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,350 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,350 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,351 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,351 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,351 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:08,351 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:47:08,351 DEBUG [RS:0;39fff3b0f89c:40051 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:47:08,352 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,352 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,352 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,352 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,352 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,352 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,40051,1731955627831-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:47:08,371 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T18:47:08,371 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,40051,1731955627831-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,371 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,371 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.Replication(171): 39fff3b0f89c,40051,1731955627831 started 2024-11-18T18:47:08,389 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:08,389 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(1482): Serving as 39fff3b0f89c,40051,1731955627831, RpcServer on 39fff3b0f89c/172.17.0.2:40051, sessionid=0x101508f48610001 2024-11-18T18:47:08,389 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T18:47:08,389 DEBUG [RS:0;39fff3b0f89c:40051 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39fff3b0f89c,40051,1731955627831 2024-11-18T18:47:08,389 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,40051,1731955627831' 2024-11-18T18:47:08,389 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T18:47:08,390 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T18:47:08,391 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T18:47:08,391 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T18:47:08,391 DEBUG [RS:0;39fff3b0f89c:40051 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39fff3b0f89c,40051,1731955627831 2024-11-18T18:47:08,391 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,40051,1731955627831' 2024-11-18T18:47:08,391 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T18:47:08,391 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T18:47:08,392 DEBUG [RS:0;39fff3b0f89c:40051 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T18:47:08,392 INFO [RS:0;39fff3b0f89c:40051 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T18:47:08,392 INFO [RS:0;39fff3b0f89c:40051 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T18:47:08,437 WARN [39fff3b0f89c:44243 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-18T18:47:08,495 INFO [RS:0;39fff3b0f89c:40051 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C40051%2C1731955627831, suffix=, logDir=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/WALs/39fff3b0f89c,40051,1731955627831, archiveDir=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/oldWALs, maxLogs=32 2024-11-18T18:47:08,497 INFO [RS:0;39fff3b0f89c:40051 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C40051%2C1731955627831.1731955628497 2024-11-18T18:47:08,506 INFO [RS:0;39fff3b0f89c:40051 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/WALs/39fff3b0f89c,40051,1731955627831/39fff3b0f89c%2C40051%2C1731955627831.1731955628497 2024-11-18T18:47:08,507 DEBUG [RS:0;39fff3b0f89c:40051 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43151:43151),(127.0.0.1/127.0.0.1:35629:35629)] 2024-11-18T18:47:08,688 DEBUG [39fff3b0f89c:44243 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T18:47:08,689 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39fff3b0f89c,40051,1731955627831 2024-11-18T18:47:08,692 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,40051,1731955627831, state=OPENING 2024-11-18T18:47:08,739 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T18:47:08,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:08,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:08,749 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:47:08,749 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:47:08,749 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:47:08,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,40051,1731955627831}] 2024-11-18T18:47:08,904 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T18:47:08,908 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38843, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T18:47:08,914 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T18:47:08,914 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:47:08,918 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C40051%2C1731955627831.meta, suffix=.meta, logDir=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/WALs/39fff3b0f89c,40051,1731955627831, archiveDir=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/oldWALs, maxLogs=32 2024-11-18T18:47:08,921 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C40051%2C1731955627831.meta.1731955628920.meta 2024-11-18T18:47:08,929 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/WALs/39fff3b0f89c,40051,1731955627831/39fff3b0f89c%2C40051%2C1731955627831.meta.1731955628920.meta 2024-11-18T18:47:08,930 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35629:35629),(127.0.0.1/127.0.0.1:43151:43151)] 2024-11-18T18:47:08,931 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:47:08,932 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T18:47:08,932 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T18:47:08,932 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T18:47:08,932 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T18:47:08,932 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:47:08,932 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T18:47:08,932 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T18:47:08,935 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:47:08,937 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:47:08,937 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:08,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:47:08,939 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:47:08,939 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,940 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:08,940 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:47:08,942 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:47:08,942 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:08,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:47:08,944 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:47:08,944 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:08,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:08,945 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:47:08,947 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/1588230740 2024-11-18T18:47:08,949 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/1588230740 2024-11-18T18:47:08,950 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:47:08,950 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:47:08,951 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:47:08,953 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:47:08,954 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777723, jitterRate=-0.011075228452682495}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:47:08,955 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T18:47:08,956 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731955628933Writing region info on filesystem at 1731955628933Initializing all the Stores at 1731955628934 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955628934Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955628935 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955628935Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955628935Cleaning up temporary data from old regions at 1731955628950 (+15 ms)Running coprocessor post-open hooks at 1731955628955 (+5 ms)Region opened successfully at 1731955628955 2024-11-18T18:47:08,957 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731955628904 2024-11-18T18:47:08,960 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T18:47:08,960 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T18:47:08,961 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,40051,1731955627831 2024-11-18T18:47:08,963 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,40051,1731955627831, state=OPEN 2024-11-18T18:47:09,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:47:09,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:47:09,001 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39fff3b0f89c,40051,1731955627831 2024-11-18T18:47:09,001 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:47:09,001 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:47:09,007 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T18:47:09,007 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,40051,1731955627831 in 252 msec 2024-11-18T18:47:09,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T18:47:09,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 724 msec 2024-11-18T18:47:09,013 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:47:09,013 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T18:47:09,015 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:47:09,015 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,40051,1731955627831, seqNum=-1] 2024-11-18T18:47:09,016 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:47:09,018 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52829, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:47:09,027 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 813 msec 2024-11-18T18:47:09,028 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731955629028, completionTime=-1 2024-11-18T18:47:09,028 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T18:47:09,028 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-18T18:47:09,031 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-18T18:47:09,031 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731955689031 2024-11-18T18:47:09,031 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731955749031 2024-11-18T18:47:09,031 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-18T18:47:09,031 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,44243,1731955627662-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:09,031 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,44243,1731955627662-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:09,032 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,44243,1731955627662-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:09,032 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39fff3b0f89c:44243, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:09,032 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:09,032 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:09,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-18T18:47:09,034 DEBUG [master/39fff3b0f89c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T18:47:09,038 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.107sec 2024-11-18T18:47:09,038 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T18:47:09,038 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T18:47:09,038 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T18:47:09,038 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T18:47:09,038 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T18:47:09,038 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,44243,1731955627662-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:47:09,038 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,44243,1731955627662-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T18:47:09,041 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T18:47:09,041 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T18:47:09,042 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,44243,1731955627662-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:09,079 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f08fec3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:47:09,079 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39fff3b0f89c,44243,-1 for getting cluster id 2024-11-18T18:47:09,079 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T18:47:09,081 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b26e3518-a515-4486-8604-e6f950273a82' 2024-11-18T18:47:09,082 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T18:47:09,082 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b26e3518-a515-4486-8604-e6f950273a82" 2024-11-18T18:47:09,082 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ba963c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:47:09,082 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39fff3b0f89c,44243,-1] 2024-11-18T18:47:09,083 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T18:47:09,083 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:09,085 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49308, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T18:47:09,086 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63bb2e54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:47:09,087 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:47:09,088 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,40051,1731955627831, seqNum=-1] 2024-11-18T18:47:09,089 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:47:09,091 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41802, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:47:09,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39fff3b0f89c,44243,1731955627662 2024-11-18T18:47:09,094 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:09,098 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T18:47:09,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T18:47:09,098 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:47:09,098 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:47:09,098 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:09,099 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:09,099 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T18:47:09,099 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T18:47:09,099 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=423581442, stopped=false 2024-11-18T18:47:09,099 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39fff3b0f89c,44243,1731955627662 2024-11-18T18:47:09,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:47:09,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:47:09,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:09,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:09,115 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:47:09,116 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:47:09,116 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:47:09,116 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:47:09,116 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:47:09,116 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:09,117 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39fff3b0f89c,40051,1731955627831' ***** 2024-11-18T18:47:09,117 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T18:47:09,117 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T18:47:09,117 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T18:47:09,117 INFO [RS:0;39fff3b0f89c:40051 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T18:47:09,117 INFO [RS:0;39fff3b0f89c:40051 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T18:47:09,117 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(959): stopping server 39fff3b0f89c,40051,1731955627831 2024-11-18T18:47:09,117 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:47:09,118 INFO [RS:0;39fff3b0f89c:40051 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39fff3b0f89c:40051. 2024-11-18T18:47:09,118 DEBUG [RS:0;39fff3b0f89c:40051 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:47:09,118 DEBUG [RS:0;39fff3b0f89c:40051 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:09,118 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T18:47:09,118 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T18:47:09,118 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T18:47:09,118 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T18:47:09,118 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-18T18:47:09,118 DEBUG [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-18T18:47:09,119 DEBUG [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T18:47:09,119 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:47:09,119 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:47:09,119 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:47:09,119 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:47:09,119 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:47:09,119 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-18T18:47:09,140 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/1588230740/.tmp/ns/1d8e4c498463458bb51ae5328343622b is 43, key is default/ns:d/1731955629019/Put/seqid=0 2024-11-18T18:47:09,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741835_1011 (size=5153) 2024-11-18T18:47:09,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741835_1011 (size=5153) 2024-11-18T18:47:09,148 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/1588230740/.tmp/ns/1d8e4c498463458bb51ae5328343622b 2024-11-18T18:47:09,158 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/1588230740/.tmp/ns/1d8e4c498463458bb51ae5328343622b as hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/1588230740/ns/1d8e4c498463458bb51ae5328343622b 2024-11-18T18:47:09,167 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/1588230740/ns/1d8e4c498463458bb51ae5328343622b, entries=2, sequenceid=6, filesize=5.0 K 2024-11-18T18:47:09,169 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 50ms, sequenceid=6, compaction requested=false 2024-11-18T18:47:09,169 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T18:47:09,175 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-18T18:47:09,176 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:47:09,176 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:47:09,176 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955629119Running coprocessor pre-close hooks at 1731955629119Disabling compacts and flushes for region at 1731955629119Disabling writes for close at 1731955629119Obtaining lock to block concurrent updates at 1731955629119Preparing flush snapshotting stores in 1588230740 at 1731955629119Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731955629120 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731955629121 (+1 ms)Flushing 1588230740/ns: creating writer at 1731955629121Flushing 1588230740/ns: appending metadata at 1731955629139 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731955629140 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e002fbd: reopening flushed file at 1731955629157 (+17 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 50ms, sequenceid=6, compaction requested=false at 1731955629169 (+12 ms)Writing region close event to WAL at 1731955629170 (+1 ms)Running coprocessor post-close hooks at 1731955629175 (+5 ms)Closed at 1731955629176 (+1 ms) 2024-11-18T18:47:09,176 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T18:47:09,319 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(976): stopping server 39fff3b0f89c,40051,1731955627831; all regions closed. 2024-11-18T18:47:09,320 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,320 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,320 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,320 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,320 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741834_1010 (size=1152) 2024-11-18T18:47:09,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741834_1010 (size=1152) 2024-11-18T18:47:09,439 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T18:47:09,439 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T18:47:09,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:09,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:09,727 DEBUG [RS:0;39fff3b0f89c:40051 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/oldWALs 2024-11-18T18:47:09,727 INFO [RS:0;39fff3b0f89c:40051 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C40051%2C1731955627831.meta:.meta(num 1731955628920) 2024-11-18T18:47:09,728 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,728 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,728 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,728 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,729 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741833_1009 (size=93) 2024-11-18T18:47:09,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741833_1009 (size=93) 2024-11-18T18:47:09,735 DEBUG [RS:0;39fff3b0f89c:40051 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/oldWALs 2024-11-18T18:47:09,735 INFO [RS:0;39fff3b0f89c:40051 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C40051%2C1731955627831:(num 1731955628497) 2024-11-18T18:47:09,735 DEBUG [RS:0;39fff3b0f89c:40051 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:09,735 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:47:09,736 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:47:09,736 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.ChoreService(370): Chore service for: regionserver/39fff3b0f89c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T18:47:09,736 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:47:09,736 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:47:09,736 INFO [RS:0;39fff3b0f89c:40051 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40051 2024-11-18T18:47:09,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:47:09,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39fff3b0f89c,40051,1731955627831 2024-11-18T18:47:09,765 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:47:09,773 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39fff3b0f89c,40051,1731955627831] 2024-11-18T18:47:09,782 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39fff3b0f89c,40051,1731955627831 already deleted, retry=false 2024-11-18T18:47:09,782 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39fff3b0f89c,40051,1731955627831 expired; onlineServers=0 2024-11-18T18:47:09,782 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39fff3b0f89c,44243,1731955627662' ***** 2024-11-18T18:47:09,782 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T18:47:09,782 INFO [M:0;39fff3b0f89c:44243 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:47:09,782 INFO [M:0;39fff3b0f89c:44243 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:47:09,782 DEBUG [M:0;39fff3b0f89c:44243 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T18:47:09,783 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T18:47:09,783 DEBUG [M:0;39fff3b0f89c:44243 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T18:47:09,783 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955628220 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955628220,5,FailOnTimeoutGroup] 2024-11-18T18:47:09,783 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955628220 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955628220,5,FailOnTimeoutGroup] 2024-11-18T18:47:09,783 INFO [M:0;39fff3b0f89c:44243 {}] hbase.ChoreService(370): Chore service for: master/39fff3b0f89c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T18:47:09,783 INFO [M:0;39fff3b0f89c:44243 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:47:09,783 DEBUG [M:0;39fff3b0f89c:44243 {}] master.HMaster(1795): Stopping service threads 2024-11-18T18:47:09,784 INFO [M:0;39fff3b0f89c:44243 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T18:47:09,784 INFO [M:0;39fff3b0f89c:44243 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:47:09,784 INFO [M:0;39fff3b0f89c:44243 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T18:47:09,784 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T18:47:09,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T18:47:09,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:09,790 DEBUG [M:0;39fff3b0f89c:44243 {}] zookeeper.ZKUtil(347): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T18:47:09,791 WARN [M:0;39fff3b0f89c:44243 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T18:47:09,791 INFO [M:0;39fff3b0f89c:44243 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/.lastflushedseqids 2024-11-18T18:47:09,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741836_1012 (size=108) 2024-11-18T18:47:09,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741836_1012 (size=108) 2024-11-18T18:47:09,802 INFO [M:0;39fff3b0f89c:44243 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T18:47:09,802 INFO [M:0;39fff3b0f89c:44243 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T18:47:09,802 DEBUG [M:0;39fff3b0f89c:44243 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:47:09,802 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:09,802 DEBUG [M:0;39fff3b0f89c:44243 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:09,803 DEBUG [M:0;39fff3b0f89c:44243 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:47:09,803 DEBUG [M:0;39fff3b0f89c:44243 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:09,803 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-18T18:47:09,825 DEBUG [M:0;39fff3b0f89c:44243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b1e90396ff948bd9771c0fbe51a0f9a is 82, key is hbase:meta,,1/info:regioninfo/1731955628961/Put/seqid=0 2024-11-18T18:47:09,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741837_1013 (size=5672) 2024-11-18T18:47:09,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741837_1013 (size=5672) 2024-11-18T18:47:09,831 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b1e90396ff948bd9771c0fbe51a0f9a 2024-11-18T18:47:09,855 DEBUG [M:0;39fff3b0f89c:44243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0e3a7a05342400aa14cf0d47de618bf is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731955629026/Put/seqid=0 2024-11-18T18:47:09,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741838_1014 (size=5275) 2024-11-18T18:47:09,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741838_1014 (size=5275) 2024-11-18T18:47:09,861 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0e3a7a05342400aa14cf0d47de618bf 2024-11-18T18:47:09,873 INFO [RS:0;39fff3b0f89c:40051 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:47:09,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:09,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40051-0x101508f48610001, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:09,874 INFO [RS:0;39fff3b0f89c:40051 {}] regionserver.HRegionServer(1031): Exiting; stopping=39fff3b0f89c,40051,1731955627831; zookeeper connection closed. 2024-11-18T18:47:09,874 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@48e74791 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@48e74791 2024-11-18T18:47:09,874 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T18:47:09,886 DEBUG [M:0;39fff3b0f89c:44243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9eabca430194eaa8b8f033b209623b0 is 69, key is 39fff3b0f89c,40051,1731955627831/rs:state/1731955628319/Put/seqid=0 2024-11-18T18:47:09,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741839_1015 (size=5156) 2024-11-18T18:47:09,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741839_1015 (size=5156) 2024-11-18T18:47:09,892 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9eabca430194eaa8b8f033b209623b0 2024-11-18T18:47:09,917 DEBUG [M:0;39fff3b0f89c:44243 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cc57645e84a34b86895f5fc19a903838 is 52, key is load_balancer_on/state:d/1731955629096/Put/seqid=0 2024-11-18T18:47:09,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741840_1016 (size=5056) 2024-11-18T18:47:09,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741840_1016 (size=5056) 2024-11-18T18:47:09,924 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cc57645e84a34b86895f5fc19a903838 2024-11-18T18:47:09,932 DEBUG [M:0;39fff3b0f89c:44243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b1e90396ff948bd9771c0fbe51a0f9a as hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1b1e90396ff948bd9771c0fbe51a0f9a 2024-11-18T18:47:09,939 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1b1e90396ff948bd9771c0fbe51a0f9a, entries=8, sequenceid=29, filesize=5.5 K 2024-11-18T18:47:09,941 DEBUG [M:0;39fff3b0f89c:44243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0e3a7a05342400aa14cf0d47de618bf as hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0e3a7a05342400aa14cf0d47de618bf 2024-11-18T18:47:09,948 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0e3a7a05342400aa14cf0d47de618bf, entries=3, sequenceid=29, filesize=5.2 K 2024-11-18T18:47:09,950 DEBUG [M:0;39fff3b0f89c:44243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9eabca430194eaa8b8f033b209623b0 as hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c9eabca430194eaa8b8f033b209623b0 2024-11-18T18:47:09,956 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c9eabca430194eaa8b8f033b209623b0, entries=1, sequenceid=29, filesize=5.0 K 2024-11-18T18:47:09,958 DEBUG [M:0;39fff3b0f89c:44243 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/cc57645e84a34b86895f5fc19a903838 as hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cc57645e84a34b86895f5fc19a903838 2024-11-18T18:47:09,965 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/a19ffb64-d236-d2c0-ab1a-3e4d937a1daf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/cc57645e84a34b86895f5fc19a903838, entries=1, sequenceid=29, filesize=4.9 K 2024-11-18T18:47:09,966 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 163ms, sequenceid=29, compaction requested=false 2024-11-18T18:47:09,968 INFO [M:0;39fff3b0f89c:44243 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:09,969 DEBUG [M:0;39fff3b0f89c:44243 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955629802Disabling compacts and flushes for region at 1731955629802Disabling writes for close at 1731955629803 (+1 ms)Obtaining lock to block concurrent updates at 1731955629803Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731955629803Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731955629803Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731955629804 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731955629804Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731955629824 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731955629824Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731955629838 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731955629855 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731955629855Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731955629868 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731955629885 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731955629885Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731955629901 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731955629917 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731955629917Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@659f7173: reopening flushed file at 1731955629930 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@137a48db: reopening flushed file at 1731955629939 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5517864f: reopening flushed file at 1731955629949 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48e00e8f: reopening flushed file at 1731955629957 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 163ms, sequenceid=29, compaction requested=false at 1731955629966 (+9 ms)Writing region close event to WAL at 1731955629968 (+2 ms)Closed at 1731955629968 2024-11-18T18:47:09,969 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,969 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,969 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,969 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,970 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:09,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43705 is added to blk_1073741830_1006 (size=10311) 2024-11-18T18:47:09,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43439 is added to blk_1073741830_1006 (size=10311) 2024-11-18T18:47:09,973 INFO [M:0;39fff3b0f89c:44243 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T18:47:09,973 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:47:09,973 INFO [M:0;39fff3b0f89c:44243 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44243 2024-11-18T18:47:09,973 INFO [M:0;39fff3b0f89c:44243 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:47:10,082 INFO [M:0;39fff3b0f89c:44243 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:47:10,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:10,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44243-0x101508f48610000, quorum=127.0.0.1:54613, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:10,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62c9cc57{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:10,085 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3756c399{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:10,085 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:10,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c5ed954{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:10,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@592c88e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:10,087 WARN [BP-1566644758-172.17.0.2-1731955625732 heartbeating to localhost/127.0.0.1:41775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:47:10,087 WARN [BP-1566644758-172.17.0.2-1731955625732 heartbeating to localhost/127.0.0.1:41775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1566644758-172.17.0.2-1731955625732 (Datanode Uuid 11466d62-24dc-406d-8acc-462e1a46d029) service to localhost/127.0.0.1:41775 2024-11-18T18:47:10,088 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40/data/data3/current/BP-1566644758-172.17.0.2-1731955625732 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:10,088 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40/data/data4/current/BP-1566644758-172.17.0.2-1731955625732 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:10,089 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:47:10,089 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:47:10,089 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:47:10,091 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12742a74{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:10,091 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f1e2b4c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:10,092 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:10,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3a779{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:10,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f754f75{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:10,094 WARN [BP-1566644758-172.17.0.2-1731955625732 heartbeating to localhost/127.0.0.1:41775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:47:10,094 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:47:10,094 WARN [BP-1566644758-172.17.0.2-1731955625732 heartbeating to localhost/127.0.0.1:41775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1566644758-172.17.0.2-1731955625732 (Datanode Uuid 0435a990-007a-4309-a0f8-06cccf410010) service to localhost/127.0.0.1:41775 2024-11-18T18:47:10,094 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:47:10,094 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40/data/data1/current/BP-1566644758-172.17.0.2-1731955625732 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:10,095 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/cluster_1dd426a9-f7fc-be54-1c30-093f69375e40/data/data2/current/BP-1566644758-172.17.0.2-1731955625732 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:10,095 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:47:10,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b839c20{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:47:10,102 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d00b522{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:10,102 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:10,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c8f0dfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:10,103 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4340a53c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:10,109 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T18:47:10,123 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T18:47:10,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:10,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T18:47:10,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T18:47:10,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/hadoop.log.dir so I do NOT create it in target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb 2024-11-18T18:47:10,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1f49c301-3257-dd8c-acf2-b3571a3bc15f/hadoop.tmp.dir so I do NOT create it in target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb 2024-11-18T18:47:10,133 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c, deleteOnExit=true 2024-11-18T18:47:10,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T18:47:10,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/test.cache.data in system properties and HBase conf 2024-11-18T18:47:10,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T18:47:10,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir in system properties and HBase conf 2024-11-18T18:47:10,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T18:47:10,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T18:47:10,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T18:47:10,134 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T18:47:10,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:47:10,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:47:10,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T18:47:10,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:47:10,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T18:47:10,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T18:47:10,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:47:10,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:47:10,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T18:47:10,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/nfs.dump.dir in system properties and HBase conf 2024-11-18T18:47:10,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/java.io.tmpdir in system properties and HBase conf 2024-11-18T18:47:10,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:47:10,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T18:47:10,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T18:47:10,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:10,144 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:10,145 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:10,156 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:47:10,353 INFO [regionserver/39fff3b0f89c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:47:10,430 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:10,437 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:10,442 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:10,442 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:10,442 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:47:10,443 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:10,444 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@281d64b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:10,444 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37564f36{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:10,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58a4fc41{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/java.io.tmpdir/jetty-localhost-34853-hadoop-hdfs-3_4_1-tests_jar-_-any-7484180949012386962/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:47:10,556 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e92d0c5{HTTP/1.1, (http/1.1)}{localhost:34853} 2024-11-18T18:47:10,556 INFO [Time-limited test {}] server.Server(415): Started @107945ms 2024-11-18T18:47:10,576 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:47:10,790 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:10,795 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:10,799 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:10,800 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:10,800 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:47:10,800 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@198a2712{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:10,801 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@172c9107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:10,906 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32694da{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/java.io.tmpdir/jetty-localhost-44085-hadoop-hdfs-3_4_1-tests_jar-_-any-15550345055567873833/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:10,907 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@150beccb{HTTP/1.1, (http/1.1)}{localhost:44085} 2024-11-18T18:47:10,907 INFO [Time-limited test {}] server.Server(415): Started @108296ms 2024-11-18T18:47:10,908 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:47:10,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:10,946 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:10,947 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:10,947 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:10,947 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:47:10,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7acad2a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:10,948 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74ca9210{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:11,050 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e7c1fd9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/java.io.tmpdir/jetty-localhost-39959-hadoop-hdfs-3_4_1-tests_jar-_-any-17611055132854682630/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:11,051 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2070bd8d{HTTP/1.1, (http/1.1)}{localhost:39959} 2024-11-18T18:47:11,051 INFO [Time-limited test {}] server.Server(415): Started @108440ms 2024-11-18T18:47:11,052 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:47:11,674 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data1/current/BP-1625265116-172.17.0.2-1731955630168/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:11,674 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data2/current/BP-1625265116-172.17.0.2-1731955630168/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:11,703 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:47:11,706 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x238bf12385b1ae47 with lease ID 0x7a03a54b6d62c314: Processing first storage report for DS-be88c8a9-50e9-470c-b3d3-3523e44434ed from datanode DatanodeRegistration(127.0.0.1:41919, datanodeUuid=fd6f6d6d-f2c4-49d8-ba95-650f6c3f539d, infoPort=40081, infoSecurePort=0, ipcPort=46443, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168) 2024-11-18T18:47:11,706 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x238bf12385b1ae47 with lease ID 0x7a03a54b6d62c314: from storage DS-be88c8a9-50e9-470c-b3d3-3523e44434ed node DatanodeRegistration(127.0.0.1:41919, datanodeUuid=fd6f6d6d-f2c4-49d8-ba95-650f6c3f539d, infoPort=40081, infoSecurePort=0, ipcPort=46443, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:11,706 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x238bf12385b1ae47 with lease ID 0x7a03a54b6d62c314: Processing first storage report for DS-2ef8f84f-69cc-45f2-8a4e-84b3947384bf from datanode DatanodeRegistration(127.0.0.1:41919, datanodeUuid=fd6f6d6d-f2c4-49d8-ba95-650f6c3f539d, infoPort=40081, infoSecurePort=0, ipcPort=46443, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168) 2024-11-18T18:47:11,706 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x238bf12385b1ae47 with lease ID 0x7a03a54b6d62c314: from storage DS-2ef8f84f-69cc-45f2-8a4e-84b3947384bf node DatanodeRegistration(127.0.0.1:41919, datanodeUuid=fd6f6d6d-f2c4-49d8-ba95-650f6c3f539d, infoPort=40081, infoSecurePort=0, ipcPort=46443, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:11,902 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data3/current/BP-1625265116-172.17.0.2-1731955630168/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:11,904 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data4/current/BP-1625265116-172.17.0.2-1731955630168/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:11,929 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:47:11,932 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ce90379af2becbb with lease ID 0x7a03a54b6d62c315: Processing first storage report for DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5 from datanode DatanodeRegistration(127.0.0.1:38853, datanodeUuid=056ed252-36c4-4fce-a591-0395ffe8d5bd, infoPort=44119, infoSecurePort=0, ipcPort=41241, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168) 2024-11-18T18:47:11,932 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ce90379af2becbb with lease ID 0x7a03a54b6d62c315: from storage DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5 node DatanodeRegistration(127.0.0.1:38853, datanodeUuid=056ed252-36c4-4fce-a591-0395ffe8d5bd, infoPort=44119, infoSecurePort=0, ipcPort=41241, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T18:47:11,932 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ce90379af2becbb with lease ID 0x7a03a54b6d62c315: Processing first storage report for DS-c879dd95-44e2-47ea-a3ee-fc8171daefce from datanode DatanodeRegistration(127.0.0.1:38853, datanodeUuid=056ed252-36c4-4fce-a591-0395ffe8d5bd, infoPort=44119, infoSecurePort=0, ipcPort=41241, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168) 2024-11-18T18:47:11,932 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ce90379af2becbb with lease ID 0x7a03a54b6d62c315: from storage DS-c879dd95-44e2-47ea-a3ee-fc8171daefce node DatanodeRegistration(127.0.0.1:38853, datanodeUuid=056ed252-36c4-4fce-a591-0395ffe8d5bd, infoPort=44119, infoSecurePort=0, ipcPort=41241, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:11,997 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb 2024-11-18T18:47:12,001 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/zookeeper_0, clientPort=55514, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T18:47:12,002 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55514 2024-11-18T18:47:12,002 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:12,004 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:12,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41919 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:47:12,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38853 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:47:12,015 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5 with version=8 2024-11-18T18:47:12,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/hbase-staging 2024-11-18T18:47:12,018 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:47:12,018 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:12,018 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:12,018 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:47:12,018 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:12,018 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:47:12,019 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T18:47:12,019 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:47:12,019 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38533 2024-11-18T18:47:12,022 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38533 connecting to ZooKeeper ensemble=127.0.0.1:55514 2024-11-18T18:47:12,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:385330x0, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:47:12,083 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38533-0x101508f59690000 connected 2024-11-18T18:47:12,157 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:12,159 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:12,162 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:47:12,162 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5, hbase.cluster.distributed=false 2024-11-18T18:47:12,163 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:47:12,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38533 2024-11-18T18:47:12,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38533 2024-11-18T18:47:12,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38533 2024-11-18T18:47:12,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38533 2024-11-18T18:47:12,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38533 2024-11-18T18:47:12,183 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:47:12,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:12,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:12,183 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:47:12,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:12,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:47:12,183 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T18:47:12,184 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:47:12,184 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35551 2024-11-18T18:47:12,186 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35551 connecting to ZooKeeper ensemble=127.0.0.1:55514 2024-11-18T18:47:12,187 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:12,190 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:12,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:355510x0, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:47:12,207 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:47:12,207 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35551-0x101508f59690001 connected 2024-11-18T18:47:12,208 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T18:47:12,208 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T18:47:12,209 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T18:47:12,210 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:47:12,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35551 2024-11-18T18:47:12,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35551 2024-11-18T18:47:12,212 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35551 2024-11-18T18:47:12,212 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35551 2024-11-18T18:47:12,212 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35551 2024-11-18T18:47:12,225 DEBUG [M:0;39fff3b0f89c:38533 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39fff3b0f89c:38533 2024-11-18T18:47:12,225 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39fff3b0f89c,38533,1731955632018 2024-11-18T18:47:12,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:47:12,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:47:12,232 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39fff3b0f89c,38533,1731955632018 2024-11-18T18:47:12,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T18:47:12,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:12,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:12,240 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T18:47:12,241 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39fff3b0f89c,38533,1731955632018 from backup master directory 2024-11-18T18:47:12,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39fff3b0f89c,38533,1731955632018 2024-11-18T18:47:12,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:47:12,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:47:12,248 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:47:12,248 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39fff3b0f89c,38533,1731955632018 2024-11-18T18:47:12,254 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/hbase.id] with ID: 38871ccc-f2e4-46e6-8439-b3d24921d879 2024-11-18T18:47:12,254 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/.tmp/hbase.id 2024-11-18T18:47:12,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38853 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:47:12,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41919 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:47:12,266 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/.tmp/hbase.id]:[hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/hbase.id] 2024-11-18T18:47:12,279 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:12,280 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T18:47:12,281 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T18:47:12,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:12,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:12,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38853 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:47:12,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41919 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:47:12,302 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:47:12,303 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T18:47:12,303 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:47:12,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41919 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:47:12,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38853 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:47:12,318 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store 2024-11-18T18:47:12,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38853 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:47:12,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41919 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:47:12,328 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:47:12,328 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:47:12,328 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:12,328 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:12,328 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:47:12,328 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:12,328 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:12,328 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955632328Disabling compacts and flushes for region at 1731955632328Disabling writes for close at 1731955632328Writing region close event to WAL at 1731955632328Closed at 1731955632328 2024-11-18T18:47:12,329 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/.initializing 2024-11-18T18:47:12,329 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018 2024-11-18T18:47:12,334 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C38533%2C1731955632018, suffix=, logDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018, archiveDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/oldWALs, maxLogs=10 2024-11-18T18:47:12,335 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C38533%2C1731955632018.1731955632334 2024-11-18T18:47:12,346 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 2024-11-18T18:47:12,347 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40081:40081),(127.0.0.1/127.0.0.1:44119:44119)] 2024-11-18T18:47:12,350 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:47:12,350 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:47:12,351 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,351 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,352 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T18:47:12,354 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:12,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:12,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T18:47:12,357 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:12,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:47:12,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T18:47:12,360 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:12,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:47:12,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T18:47:12,362 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:12,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:47:12,363 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,364 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,364 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,366 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,366 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,367 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T18:47:12,369 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:47:12,371 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:47:12,372 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747109, jitterRate=-0.0500028133392334}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T18:47:12,373 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731955632351Initializing all the Stores at 1731955632352 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955632352Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955632352Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955632352Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955632352Cleaning up temporary data from old regions at 1731955632366 (+14 ms)Region opened successfully at 1731955632373 (+7 ms) 2024-11-18T18:47:12,374 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T18:47:12,378 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3f931d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:47:12,379 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T18:47:12,379 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T18:47:12,379 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T18:47:12,379 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T18:47:12,380 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T18:47:12,380 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T18:47:12,380 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T18:47:12,383 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T18:47:12,384 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T18:47:12,389 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T18:47:12,390 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T18:47:12,391 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T18:47:12,398 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T18:47:12,398 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T18:47:12,400 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T18:47:12,406 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T18:47:12,408 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T18:47:12,415 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T18:47:12,417 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T18:47:12,423 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T18:47:12,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:47:12,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:47:12,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:12,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:12,432 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39fff3b0f89c,38533,1731955632018, sessionid=0x101508f59690000, setting cluster-up flag (Was=false) 2024-11-18T18:47:12,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:12,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:12,473 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T18:47:12,475 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,38533,1731955632018 2024-11-18T18:47:12,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:12,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:12,515 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T18:47:12,517 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,38533,1731955632018 2024-11-18T18:47:12,519 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T18:47:12,522 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T18:47:12,522 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T18:47:12,523 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T18:47:12,523 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39fff3b0f89c,38533,1731955632018 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T18:47:12,525 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:47:12,525 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:47:12,526 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:47:12,526 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:47:12,526 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39fff3b0f89c:0, corePoolSize=10, maxPoolSize=10 2024-11-18T18:47:12,526 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,526 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:47:12,526 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,527 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731955662527 2024-11-18T18:47:12,528 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T18:47:12,528 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T18:47:12,528 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T18:47:12,528 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T18:47:12,528 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T18:47:12,528 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T18:47:12,528 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,529 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T18:47:12,529 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:47:12,529 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T18:47:12,529 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T18:47:12,529 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T18:47:12,530 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T18:47:12,530 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T18:47:12,530 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955632530,5,FailOnTimeoutGroup] 2024-11-18T18:47:12,530 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955632530,5,FailOnTimeoutGroup] 2024-11-18T18:47:12,530 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,530 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T18:47:12,530 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,530 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,530 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:12,556 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T18:47:12,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38853 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:47:12,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41919 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:47:12,573 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T18:47:12,573 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5 2024-11-18T18:47:12,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38853 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:47:12,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41919 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:47:12,586 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:47:12,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:47:12,590 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:47:12,590 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:12,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:12,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:47:12,593 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:47:12,597 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:12,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:12,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:47:12,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:47:12,600 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:12,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:12,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:47:12,603 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:47:12,603 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:12,603 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:12,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:47:12,605 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740 2024-11-18T18:47:12,605 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740 2024-11-18T18:47:12,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:47:12,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:47:12,608 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:47:12,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:47:12,612 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:47:12,613 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=776627, jitterRate=-0.01246894896030426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:47:12,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731955632586Initializing all the Stores at 1731955632587 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955632588 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955632588Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955632588Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955632588Cleaning up temporary data from old regions at 1731955632607 (+19 ms)Region opened successfully at 1731955632613 (+6 ms) 2024-11-18T18:47:12,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:47:12,613 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:47:12,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:47:12,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:47:12,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:47:12,614 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:47:12,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955632613Disabling compacts and flushes for region at 1731955632613Disabling writes for close at 1731955632614 (+1 ms)Writing region close event to WAL at 1731955632614Closed at 1731955632614 2024-11-18T18:47:12,615 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(746): ClusterId : 38871ccc-f2e4-46e6-8439-b3d24921d879 2024-11-18T18:47:12,615 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T18:47:12,616 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:47:12,616 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T18:47:12,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T18:47:12,619 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:47:12,620 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T18:47:12,624 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T18:47:12,624 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T18:47:12,632 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T18:47:12,632 DEBUG [RS:0;39fff3b0f89c:35551 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@604ab482, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:47:12,647 DEBUG [RS:0;39fff3b0f89c:35551 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39fff3b0f89c:35551 2024-11-18T18:47:12,647 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T18:47:12,647 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T18:47:12,647 DEBUG [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T18:47:12,648 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(2659): reportForDuty to master=39fff3b0f89c,38533,1731955632018 with port=35551, startcode=1731955632182 2024-11-18T18:47:12,648 DEBUG [RS:0;39fff3b0f89c:35551 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T18:47:12,650 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45653, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T18:47:12,651 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38533 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:12,651 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38533 {}] master.ServerManager(517): Registering regionserver=39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:12,653 DEBUG [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5 2024-11-18T18:47:12,653 DEBUG [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44763 2024-11-18T18:47:12,653 DEBUG [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T18:47:12,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:47:12,664 DEBUG [RS:0;39fff3b0f89c:35551 {}] zookeeper.ZKUtil(111): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:12,664 WARN [RS:0;39fff3b0f89c:35551 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:47:12,664 INFO [RS:0;39fff3b0f89c:35551 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:47:12,665 DEBUG [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:12,665 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39fff3b0f89c,35551,1731955632182] 2024-11-18T18:47:12,671 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T18:47:12,673 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T18:47:12,673 INFO [RS:0;39fff3b0f89c:35551 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T18:47:12,673 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,674 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T18:47:12,675 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T18:47:12,675 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,675 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,675 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,675 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,675 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,675 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,675 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:47:12,676 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,676 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,676 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,676 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,676 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,676 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:12,676 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:47:12,676 DEBUG [RS:0;39fff3b0f89c:35551 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:47:12,679 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,679 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,679 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,679 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,679 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,679 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,35551,1731955632182-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:47:12,697 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T18:47:12,697 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,35551,1731955632182-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,697 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,697 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.Replication(171): 39fff3b0f89c,35551,1731955632182 started 2024-11-18T18:47:12,714 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:12,714 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(1482): Serving as 39fff3b0f89c,35551,1731955632182, RpcServer on 39fff3b0f89c/172.17.0.2:35551, sessionid=0x101508f59690001 2024-11-18T18:47:12,714 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T18:47:12,714 DEBUG [RS:0;39fff3b0f89c:35551 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:12,714 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,35551,1731955632182' 2024-11-18T18:47:12,714 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T18:47:12,715 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T18:47:12,715 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T18:47:12,716 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T18:47:12,716 DEBUG [RS:0;39fff3b0f89c:35551 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:12,716 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,35551,1731955632182' 2024-11-18T18:47:12,716 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T18:47:12,716 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T18:47:12,716 DEBUG [RS:0;39fff3b0f89c:35551 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T18:47:12,717 INFO [RS:0;39fff3b0f89c:35551 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T18:47:12,717 INFO [RS:0;39fff3b0f89c:35551 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T18:47:12,771 WARN [39fff3b0f89c:38533 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-18T18:47:12,820 INFO [RS:0;39fff3b0f89c:35551 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C35551%2C1731955632182, suffix=, logDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182, archiveDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs, maxLogs=32 2024-11-18T18:47:12,822 INFO [RS:0;39fff3b0f89c:35551 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C35551%2C1731955632182.1731955632822 2024-11-18T18:47:12,829 INFO [RS:0;39fff3b0f89c:35551 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 2024-11-18T18:47:12,830 DEBUG [RS:0;39fff3b0f89c:35551 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40081:40081),(127.0.0.1/127.0.0.1:44119:44119)] 2024-11-18T18:47:13,021 DEBUG [39fff3b0f89c:38533 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T18:47:13,022 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:13,023 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,35551,1731955632182, state=OPENING 2024-11-18T18:47:13,031 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T18:47:13,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:13,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:13,040 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:47:13,041 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:47:13,041 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:47:13,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,35551,1731955632182}] 2024-11-18T18:47:13,195 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T18:47:13,197 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48983, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T18:47:13,202 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T18:47:13,202 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:47:13,204 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C35551%2C1731955632182.meta, suffix=.meta, logDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182, archiveDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs, maxLogs=32 2024-11-18T18:47:13,205 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta 2024-11-18T18:47:13,216 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta 2024-11-18T18:47:13,217 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44119:44119),(127.0.0.1/127.0.0.1:40081:40081)] 2024-11-18T18:47:13,232 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:47:13,232 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T18:47:13,232 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T18:47:13,232 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T18:47:13,233 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T18:47:13,233 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:47:13,233 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T18:47:13,233 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T18:47:13,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:47:13,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:47:13,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:13,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:13,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:47:13,238 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:47:13,238 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:13,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:13,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:47:13,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:47:13,240 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:13,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:13,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:47:13,242 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:47:13,242 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:13,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:47:13,242 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:47:13,243 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740 2024-11-18T18:47:13,244 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740 2024-11-18T18:47:13,246 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:47:13,246 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:47:13,247 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:47:13,248 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:47:13,249 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828901, jitterRate=0.054002270102500916}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:47:13,250 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T18:47:13,250 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731955633233Writing region info on filesystem at 1731955633233Initializing all the Stores at 1731955633234 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955633234Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955633234Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955633234Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955633234Cleaning up temporary data from old regions at 1731955633246 (+12 ms)Running coprocessor post-open hooks at 1731955633250 (+4 ms)Region opened successfully at 1731955633250 2024-11-18T18:47:13,251 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731955633195 2024-11-18T18:47:13,254 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T18:47:13,254 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T18:47:13,255 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:13,257 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,35551,1731955632182, state=OPEN 2024-11-18T18:47:13,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:47:13,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:47:13,291 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:13,291 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:47:13,291 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:47:13,296 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T18:47:13,296 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,35551,1731955632182 in 249 msec 2024-11-18T18:47:13,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T18:47:13,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 680 msec 2024-11-18T18:47:13,301 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:47:13,301 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T18:47:13,303 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:47:13,303 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,35551,1731955632182, seqNum=-1] 2024-11-18T18:47:13,303 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:47:13,305 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45063, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:47:13,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 790 msec 2024-11-18T18:47:13,312 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731955633312, completionTime=-1 2024-11-18T18:47:13,312 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T18:47:13,312 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-18T18:47:13,314 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-18T18:47:13,314 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731955693314 2024-11-18T18:47:13,314 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731955753314 2024-11-18T18:47:13,314 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-18T18:47:13,315 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38533,1731955632018-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,315 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38533,1731955632018-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,315 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38533,1731955632018-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,315 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39fff3b0f89c:38533, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,315 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,315 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,317 DEBUG [master/39fff3b0f89c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T18:47:13,320 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.072sec 2024-11-18T18:47:13,320 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T18:47:13,320 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T18:47:13,320 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T18:47:13,320 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T18:47:13,320 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T18:47:13,320 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38533,1731955632018-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:47:13,321 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38533,1731955632018-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T18:47:13,323 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T18:47:13,323 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T18:47:13,323 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38533,1731955632018-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4329dd09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:47:13,416 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39fff3b0f89c,38533,-1 for getting cluster id 2024-11-18T18:47:13,416 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T18:47:13,419 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '38871ccc-f2e4-46e6-8439-b3d24921d879' 2024-11-18T18:47:13,420 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T18:47:13,421 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "38871ccc-f2e4-46e6-8439-b3d24921d879" 2024-11-18T18:47:13,421 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fbf95e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:47:13,421 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39fff3b0f89c,38533,-1] 2024-11-18T18:47:13,421 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T18:47:13,422 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:13,424 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35246, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T18:47:13,425 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@240a54d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:47:13,425 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:47:13,426 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,35551,1731955632182, seqNum=-1] 2024-11-18T18:47:13,427 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:47:13,429 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60104, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:47:13,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39fff3b0f89c,38533,1731955632018 2024-11-18T18:47:13,431 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:13,434 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T18:47:13,450 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:47:13,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:13,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:13,451 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:47:13,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:47:13,451 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:47:13,451 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T18:47:13,451 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:47:13,452 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36311 2024-11-18T18:47:13,454 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36311 connecting to ZooKeeper ensemble=127.0.0.1:55514 2024-11-18T18:47:13,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:13,456 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:47:13,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:363110x0, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:47:13,474 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:363110x0, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-18T18:47:13,474 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-18T18:47:13,474 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36311-0x101508f59690002 connected 2024-11-18T18:47:13,475 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T18:47:13,480 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T18:47:13,481 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:36311-0x101508f59690002, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T18:47:13,483 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36311-0x101508f59690002, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:47:13,484 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36311 2024-11-18T18:47:13,484 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36311 2024-11-18T18:47:13,484 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36311 2024-11-18T18:47:13,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36311 2024-11-18T18:47:13,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36311 2024-11-18T18:47:13,487 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer(746): ClusterId : 38871ccc-f2e4-46e6-8439-b3d24921d879 2024-11-18T18:47:13,487 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T18:47:13,498 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T18:47:13,498 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T18:47:13,507 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T18:47:13,508 DEBUG [RS:1;39fff3b0f89c:36311 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70b85fdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:47:13,521 DEBUG [RS:1;39fff3b0f89c:36311 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;39fff3b0f89c:36311 2024-11-18T18:47:13,521 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T18:47:13,521 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T18:47:13,521 DEBUG [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T18:47:13,522 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer(2659): reportForDuty to master=39fff3b0f89c,38533,1731955632018 with port=36311, startcode=1731955633450 2024-11-18T18:47:13,522 DEBUG [RS:1;39fff3b0f89c:36311 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T18:47:13,524 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55025, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T18:47:13,525 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38533 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39fff3b0f89c,36311,1731955633450 2024-11-18T18:47:13,525 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38533 {}] master.ServerManager(517): Registering regionserver=39fff3b0f89c,36311,1731955633450 2024-11-18T18:47:13,526 DEBUG [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5 2024-11-18T18:47:13,526 DEBUG [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44763 2024-11-18T18:47:13,526 DEBUG [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T18:47:13,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:47:13,537 DEBUG [RS:1;39fff3b0f89c:36311 {}] zookeeper.ZKUtil(111): regionserver:36311-0x101508f59690002, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39fff3b0f89c,36311,1731955633450 2024-11-18T18:47:13,537 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39fff3b0f89c,36311,1731955633450] 2024-11-18T18:47:13,537 WARN [RS:1;39fff3b0f89c:36311 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:47:13,537 INFO [RS:1;39fff3b0f89c:36311 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:47:13,537 DEBUG [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450 2024-11-18T18:47:13,541 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T18:47:13,543 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T18:47:13,543 INFO [RS:1;39fff3b0f89c:36311 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T18:47:13,544 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,544 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T18:47:13,545 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T18:47:13,545 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,545 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:13,545 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:13,545 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:13,546 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:13,546 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:13,546 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:47:13,546 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:13,546 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:13,546 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:13,546 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:13,546 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:13,546 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:47:13,546 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:47:13,546 DEBUG [RS:1;39fff3b0f89c:36311 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:47:13,547 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,547 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,547 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,547 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,547 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,547 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,36311,1731955633450-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:47:13,566 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T18:47:13,566 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,36311,1731955633450-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,567 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,567 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.Replication(171): 39fff3b0f89c,36311,1731955633450 started 2024-11-18T18:47:13,582 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:47:13,582 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer(1482): Serving as 39fff3b0f89c,36311,1731955633450, RpcServer on 39fff3b0f89c/172.17.0.2:36311, sessionid=0x101508f59690002 2024-11-18T18:47:13,582 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T18:47:13,582 DEBUG [RS:1;39fff3b0f89c:36311 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39fff3b0f89c,36311,1731955633450 2024-11-18T18:47:13,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;39fff3b0f89c:36311,5,FailOnTimeoutGroup] 2024-11-18T18:47:13,582 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,36311,1731955633450' 2024-11-18T18:47:13,582 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T18:47:13,582 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-18T18:47:13,583 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T18:47:13,583 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T18:47:13,583 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T18:47:13,583 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T18:47:13,583 DEBUG [RS:1;39fff3b0f89c:36311 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39fff3b0f89c,36311,1731955633450 2024-11-18T18:47:13,583 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,36311,1731955633450' 2024-11-18T18:47:13,583 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T18:47:13,584 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T18:47:13,584 DEBUG [RS:1;39fff3b0f89c:36311 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T18:47:13,584 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 39fff3b0f89c,38533,1731955632018 2024-11-18T18:47:13,584 INFO [RS:1;39fff3b0f89c:36311 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T18:47:13,584 INFO [RS:1;39fff3b0f89c:36311 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T18:47:13,584 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5bd9f479 2024-11-18T18:47:13,584 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T18:47:13,587 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35252, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T18:47:13,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38533 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T18:47:13,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38533 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T18:47:13,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38533 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:47:13,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38533 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T18:47:13,591 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T18:47:13,591 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:13,591 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38533 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-18T18:47:13,592 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T18:47:13,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38533 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T18:47:13,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38853 is added to blk_1073741835_1011 (size=393) 2024-11-18T18:47:13,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41919 is added to blk_1073741835_1011 (size=393) 2024-11-18T18:47:13,602 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => abb47c13756aec252f5492952169050f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5 2024-11-18T18:47:13,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41919 is added to blk_1073741836_1012 (size=76) 2024-11-18T18:47:13,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38853 is added to blk_1073741836_1012 (size=76) 2024-11-18T18:47:13,610 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:47:13,610 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing abb47c13756aec252f5492952169050f, disabling compactions & flushes 2024-11-18T18:47:13,610 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:13,610 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:13,610 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. after waiting 0 ms 2024-11-18T18:47:13,610 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:13,610 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:13,610 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for abb47c13756aec252f5492952169050f: Waiting for close lock at 1731955633610Disabling compacts and flushes for region at 1731955633610Disabling writes for close at 1731955633610Writing region close event to WAL at 1731955633610Closed at 1731955633610 2024-11-18T18:47:13,612 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T18:47:13,612 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731955633612"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731955633612"}]},"ts":"1731955633612"} 2024-11-18T18:47:13,615 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T18:47:13,616 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T18:47:13,617 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731955633616"}]},"ts":"1731955633616"} 2024-11-18T18:47:13,619 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-18T18:47:13,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=abb47c13756aec252f5492952169050f, ASSIGN}] 2024-11-18T18:47:13,621 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=abb47c13756aec252f5492952169050f, ASSIGN 2024-11-18T18:47:13,623 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=abb47c13756aec252f5492952169050f, ASSIGN; state=OFFLINE, location=39fff3b0f89c,35551,1731955632182; forceNewPlan=false, retain=false 2024-11-18T18:47:13,686 INFO [RS:1;39fff3b0f89c:36311 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C36311%2C1731955633450, suffix=, logDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450, archiveDir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs, maxLogs=32 2024-11-18T18:47:13,687 INFO [RS:1;39fff3b0f89c:36311 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C36311%2C1731955633450.1731955633687 2024-11-18T18:47:13,694 INFO [RS:1;39fff3b0f89c:36311 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 2024-11-18T18:47:13,696 DEBUG [RS:1;39fff3b0f89c:36311 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44119:44119),(127.0.0.1/127.0.0.1:40081:40081)] 2024-11-18T18:47:13,773 INFO [39fff3b0f89c:38533 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-18T18:47:13,774 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=abb47c13756aec252f5492952169050f, regionState=OPENING, regionLocation=39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:13,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=abb47c13756aec252f5492952169050f, ASSIGN because future has completed 2024-11-18T18:47:13,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure abb47c13756aec252f5492952169050f, server=39fff3b0f89c,35551,1731955632182}] 2024-11-18T18:47:13,935 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:13,935 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => abb47c13756aec252f5492952169050f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:47:13,936 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath abb47c13756aec252f5492952169050f 2024-11-18T18:47:13,936 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:47:13,936 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for abb47c13756aec252f5492952169050f 2024-11-18T18:47:13,937 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for abb47c13756aec252f5492952169050f 2024-11-18T18:47:13,939 INFO [StoreOpener-abb47c13756aec252f5492952169050f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region abb47c13756aec252f5492952169050f 2024-11-18T18:47:13,940 INFO [StoreOpener-abb47c13756aec252f5492952169050f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region abb47c13756aec252f5492952169050f columnFamilyName info 2024-11-18T18:47:13,941 DEBUG [StoreOpener-abb47c13756aec252f5492952169050f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:47:13,941 INFO [StoreOpener-abb47c13756aec252f5492952169050f-1 {}] regionserver.HStore(327): Store=abb47c13756aec252f5492952169050f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:47:13,941 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for abb47c13756aec252f5492952169050f 2024-11-18T18:47:13,942 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f 2024-11-18T18:47:13,943 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f 2024-11-18T18:47:13,943 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for abb47c13756aec252f5492952169050f 2024-11-18T18:47:13,944 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for abb47c13756aec252f5492952169050f 2024-11-18T18:47:13,945 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for abb47c13756aec252f5492952169050f 2024-11-18T18:47:13,948 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:47:13,949 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened abb47c13756aec252f5492952169050f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=758439, jitterRate=-0.03559616208076477}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T18:47:13,949 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for abb47c13756aec252f5492952169050f 2024-11-18T18:47:13,950 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for abb47c13756aec252f5492952169050f: Running coprocessor pre-open hook at 1731955633937Writing region info on filesystem at 1731955633937Initializing all the Stores at 1731955633938 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955633938Cleaning up temporary data from old regions at 1731955633944 (+6 ms)Running coprocessor post-open hooks at 1731955633949 (+5 ms)Region opened successfully at 1731955633949 2024-11-18T18:47:13,951 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f., pid=6, masterSystemTime=1731955633931 2024-11-18T18:47:13,954 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:13,954 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:13,955 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=abb47c13756aec252f5492952169050f, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:13,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure abb47c13756aec252f5492952169050f, server=39fff3b0f89c,35551,1731955632182 because future has completed 2024-11-18T18:47:13,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T18:47:13,963 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure abb47c13756aec252f5492952169050f, server=39fff3b0f89c,35551,1731955632182 in 182 msec 2024-11-18T18:47:13,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T18:47:13,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=abb47c13756aec252f5492952169050f, ASSIGN in 342 msec 2024-11-18T18:47:13,967 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T18:47:13,968 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731955633967"}]},"ts":"1731955633967"} 2024-11-18T18:47:13,970 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-18T18:47:13,972 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T18:47:13,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 384 msec 2024-11-18T18:47:18,735 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T18:47:18,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:18,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:18,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:18,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:18,771 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-18T18:47:19,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T18:47:19,034 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-18T18:47:23,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38533 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T18:47:23,650 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-18T18:47:23,650 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-18T18:47:23,655 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T18:47:23,655 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:23,671 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:23,675 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:23,676 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:23,676 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:23,676 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:47:23,677 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45b83ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:23,678 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@411972d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:23,786 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f373428{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/java.io.tmpdir/jetty-localhost-42853-hadoop-hdfs-3_4_1-tests_jar-_-any-11308553927180005221/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:23,787 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@60ec384f{HTTP/1.1, (http/1.1)}{localhost:42853} 2024-11-18T18:47:23,787 INFO [Time-limited test {}] server.Server(415): Started @121176ms 2024-11-18T18:47:23,788 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:47:23,827 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:23,830 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:23,831 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:23,831 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:23,831 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:47:23,832 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d39bcd7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:23,832 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e04641a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:23,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de54521{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/java.io.tmpdir/jetty-localhost-45173-hadoop-hdfs-3_4_1-tests_jar-_-any-216789640891168077/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:23,942 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@16e41538{HTTP/1.1, (http/1.1)}{localhost:45173} 2024-11-18T18:47:23,942 INFO [Time-limited test {}] server.Server(415): Started @121331ms 2024-11-18T18:47:23,944 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:47:23,991 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:23,994 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:23,995 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:23,995 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:23,995 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:47:23,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a60480b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:23,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c0171f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:24,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f9f962a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/java.io.tmpdir/jetty-localhost-34781-hadoop-hdfs-3_4_1-tests_jar-_-any-1254408462825465149/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:24,110 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3dc14ff5{HTTP/1.1, (http/1.1)}{localhost:34781} 2024-11-18T18:47:24,110 INFO [Time-limited test {}] server.Server(415): Started @121499ms 2024-11-18T18:47:24,111 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:47:24,753 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6/current/BP-1625265116-172.17.0.2-1731955630168/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:24,753 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5/current/BP-1625265116-172.17.0.2-1731955630168/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:24,774 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:47:24,777 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x37d0935173a31cd4 with lease ID 0x7a03a54b6d62c316: Processing first storage report for DS-412d2716-6e79-4d32-b104-cd57bff1b69d from datanode DatanodeRegistration(127.0.0.1:46229, datanodeUuid=a01454de-06ee-4862-bd8f-d790f28eb598, infoPort=40245, infoSecurePort=0, ipcPort=39369, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168) 2024-11-18T18:47:24,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x37d0935173a31cd4 with lease ID 0x7a03a54b6d62c316: from storage DS-412d2716-6e79-4d32-b104-cd57bff1b69d node DatanodeRegistration(127.0.0.1:46229, datanodeUuid=a01454de-06ee-4862-bd8f-d790f28eb598, infoPort=40245, infoSecurePort=0, ipcPort=39369, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:24,777 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x37d0935173a31cd4 with lease ID 0x7a03a54b6d62c316: Processing first storage report for DS-0b9fd0ab-ceb0-4c85-af19-caf95b5fd187 from datanode DatanodeRegistration(127.0.0.1:46229, datanodeUuid=a01454de-06ee-4862-bd8f-d790f28eb598, infoPort=40245, infoSecurePort=0, ipcPort=39369, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168) 2024-11-18T18:47:24,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x37d0935173a31cd4 with lease ID 0x7a03a54b6d62c316: from storage DS-0b9fd0ab-ceb0-4c85-af19-caf95b5fd187 node DatanodeRegistration(127.0.0.1:46229, datanodeUuid=a01454de-06ee-4862-bd8f-d790f28eb598, infoPort=40245, infoSecurePort=0, ipcPort=39369, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:25,006 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data7/current/BP-1625265116-172.17.0.2-1731955630168/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:25,006 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data8/current/BP-1625265116-172.17.0.2-1731955630168/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:25,027 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:47:25,029 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8bb5444518742e1c with lease ID 0x7a03a54b6d62c317: Processing first storage report for DS-352928c3-29d3-46a8-9e52-55ea5eae6536 from datanode DatanodeRegistration(127.0.0.1:32877, datanodeUuid=37423efd-9aaf-41a5-9170-73afd3458851, infoPort=40097, infoSecurePort=0, ipcPort=36957, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168) 2024-11-18T18:47:25,029 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8bb5444518742e1c with lease ID 0x7a03a54b6d62c317: from storage DS-352928c3-29d3-46a8-9e52-55ea5eae6536 node DatanodeRegistration(127.0.0.1:32877, datanodeUuid=37423efd-9aaf-41a5-9170-73afd3458851, infoPort=40097, infoSecurePort=0, ipcPort=36957, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:25,030 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8bb5444518742e1c with lease ID 0x7a03a54b6d62c317: Processing first storage report for DS-924ffa62-886f-4ec4-bcdc-025c5e22b706 from datanode DatanodeRegistration(127.0.0.1:32877, datanodeUuid=37423efd-9aaf-41a5-9170-73afd3458851, infoPort=40097, infoSecurePort=0, ipcPort=36957, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168) 2024-11-18T18:47:25,030 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8bb5444518742e1c with lease ID 0x7a03a54b6d62c317: from storage DS-924ffa62-886f-4ec4-bcdc-025c5e22b706 node DatanodeRegistration(127.0.0.1:32877, datanodeUuid=37423efd-9aaf-41a5-9170-73afd3458851, infoPort=40097, infoSecurePort=0, ipcPort=36957, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:25,116 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data9/current/BP-1625265116-172.17.0.2-1731955630168/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:25,116 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data10/current/BP-1625265116-172.17.0.2-1731955630168/current, will proceed with Du for space computation calculation, 2024-11-18T18:47:25,135 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:47:25,138 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x11dfbd9653b2e381 with lease ID 0x7a03a54b6d62c318: Processing first storage report for DS-9d27af4d-d90a-4dfa-bc02-b91d74251180 from datanode DatanodeRegistration(127.0.0.1:35367, datanodeUuid=54250b8e-048e-4739-bcc7-7a07dfff03ae, infoPort=36651, infoSecurePort=0, ipcPort=33477, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168) 2024-11-18T18:47:25,138 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11dfbd9653b2e381 with lease ID 0x7a03a54b6d62c318: from storage DS-9d27af4d-d90a-4dfa-bc02-b91d74251180 node DatanodeRegistration(127.0.0.1:35367, datanodeUuid=54250b8e-048e-4739-bcc7-7a07dfff03ae, infoPort=36651, infoSecurePort=0, ipcPort=33477, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:25,138 WARN [ResponseProcessor for block BP-1625265116-172.17.0.2-1731955630168:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1625265116-172.17.0.2-1731955630168:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,138 WARN [ResponseProcessor for block BP-1625265116-172.17.0.2-1731955630168:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1625265116-172.17.0.2-1731955630168:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1625265116-172.17.0.2-1731955630168:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,138 WARN [ResponseProcessor for block BP-1625265116-172.17.0.2-1731955630168:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1625265116-172.17.0.2-1731955630168:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,138 WARN [ResponseProcessor for block BP-1625265116-172.17.0.2-1731955630168:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1625265116-172.17.0.2-1731955630168:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1625265116-172.17.0.2-1731955630168:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,139 WARN [DataStreamer for file /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 block BP-1625265116-172.17.0.2-1731955630168:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK], DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:25,139 WARN [DataStreamer for file /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta block BP-1625265116-172.17.0.2-1731955630168:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK], DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:25,138 WARN [PacketResponder: BP-1625265116-172.17.0.2-1731955630168:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38853] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:25,140 WARN [DataStreamer for file /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 block BP-1625265116-172.17.0.2-1731955630168:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK], DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:25,140 WARN [PacketResponder: BP-1625265116-172.17.0.2-1731955630168:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38853] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:25,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1425183507_22 at /127.0.0.1:50514 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41919:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50514 dst: /127.0.0.1:41919 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:25,140 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x11dfbd9653b2e381 with lease ID 0x7a03a54b6d62c318: Processing first storage report for DS-3365353c-2c7e-47cc-9287-eade3b032e93 from datanode DatanodeRegistration(127.0.0.1:35367, datanodeUuid=54250b8e-048e-4739-bcc7-7a07dfff03ae, infoPort=36651, infoSecurePort=0, ipcPort=33477, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168) 2024-11-18T18:47:25,140 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11dfbd9653b2e381 with lease ID 0x7a03a54b6d62c318: from storage DS-3365353c-2c7e-47cc-9287-eade3b032e93 node DatanodeRegistration(127.0.0.1:35367, datanodeUuid=54250b8e-048e-4739-bcc7-7a07dfff03ae, infoPort=36651, infoSecurePort=0, ipcPort=33477, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:25,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1592602569_22 at /127.0.0.1:44004 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:38853:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44004 dst: /127.0.0.1:38853 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:25,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50544 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41919:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50544 dst: /127.0.0.1:41919 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:25,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:43982 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38853:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43982 dst: /127.0.0.1:38853 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:25,141 WARN [DataStreamer for file /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 block BP-1625265116-172.17.0.2-1731955630168:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK], DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:25,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1425183507_22 at /127.0.0.1:43944 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38853:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43944 dst: /127.0.0.1:38853 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:25,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1592602569_22 at /127.0.0.1:50586 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41919:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50586 dst: /127.0.0.1:41919 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:25,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:43968 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38853:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43968 dst: /127.0.0.1:38853 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:25,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50556 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41919:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50556 dst: /127.0.0.1:41919 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:25,143 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e7c1fd9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:25,144 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2070bd8d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:25,144 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:25,144 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74ca9210{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:25,144 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7acad2a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:25,146 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:47:25,146 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:47:25,146 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1625265116-172.17.0.2-1731955630168 (Datanode Uuid 056ed252-36c4-4fce-a591-0395ffe8d5bd) service to localhost/127.0.0.1:44763 2024-11-18T18:47:25,146 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:47:25,146 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data3/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:25,146 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data4/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:25,147 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:47:25,147 WARN [DataStreamer for file /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta block BP-1625265116-172.17.0.2-1731955630168:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,147 WARN [DataStreamer for file /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 block BP-1625265116-172.17.0.2-1731955630168:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,149 WARN [DataStreamer for file /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 block BP-1625265116-172.17.0.2-1731955630168:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,149 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@798a0757 {}] datanode.DataXceiver(331): 127.0.0.1:41919:DataXceiver error processing unknown operation src: /127.0.0.1:50718 dst: /127.0.0.1:41919 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:25,149 WARN [DataStreamer for file /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 block BP-1625265116-172.17.0.2-1731955630168:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32694da{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:25,151 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@150beccb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:25,151 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:25,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@172c9107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:25,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@198a2712{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:25,152 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:47:25,152 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:47:25,152 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1625265116-172.17.0.2-1731955630168 (Datanode Uuid fd6f6d6d-f2c4-49d8-ba95-650f6c3f539d) service to localhost/127.0.0.1:44763 2024-11-18T18:47:25,152 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:47:25,153 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data1/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:25,153 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data2/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:25,153 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:47:25,157 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f., hostname=39fff3b0f89c,35551,1731955632182, seqNum=2] 2024-11-18T18:47:25,159 ERROR [FSHLog-0-hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5-prefix:39fff3b0f89c,35551,1731955632182 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,159 WARN [FSHLog-0-hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5-prefix:39fff3b0f89c,35551,1731955632182 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,159 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C35551%2C1731955632182:(num 1731955632822) roll requested 2024-11-18T18:47:25,160 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C35551%2C1731955632182.1731955645159 2024-11-18T18:47:25,163 WARN [Thread-902 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,163 WARN [Thread-902 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:25,163 WARN [Thread-902 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741838_1018 2024-11-18T18:47:25,166 WARN [Thread-902 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] 2024-11-18T18:47:25,180 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:25,180 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:25,180 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:25,180 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:25,180 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:25,181 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955645159 2024-11-18T18:47:25,181 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,181 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:25,182 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-18T18:47:25,182 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36651:36651),(127.0.0.1/127.0.0.1:40097:40097)] 2024-11-18T18:47:25,183 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 is not closed yet, will try archiving it next time 2024-11-18T18:47:25,183 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-18T18:47:25,183 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 2024-11-18T18:47:25,186 WARN [IPC Server handler 2 on default port 44763 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-18T18:47:25,190 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 after 5ms 2024-11-18T18:47:25,548 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:26,304 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:27,183 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:27,184 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955645159 2024-11-18T18:47:27,185 WARN [ResponseProcessor for block BP-1625265116-172.17.0.2-1731955630168:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1625265116-172.17.0.2-1731955630168:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:27,185 WARN [DataStreamer for file /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955645159 block BP-1625265116-172.17.0.2-1731955630168:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:27,185 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:36926 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:35367:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36926 dst: /127.0.0.1:35367 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:27,186 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:40030 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:32877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40030 dst: /127.0.0.1:32877 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:27,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f9f962a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:27,200 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3dc14ff5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:27,200 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:27,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c0171f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:27,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a60480b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:27,202 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:47:27,202 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:47:27,202 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1625265116-172.17.0.2-1731955630168 (Datanode Uuid 54250b8e-048e-4739-bcc7-7a07dfff03ae) service to localhost/127.0.0.1:44763 2024-11-18T18:47:27,202 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:47:27,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data9/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:27,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data10/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:27,203 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:47:27,548 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:28,304 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:29,184 WARN [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]] 2024-11-18T18:47:29,184 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:29,184 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C35551%2C1731955632182:(num 1731955645159) roll requested 2024-11-18T18:47:29,184 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C35551%2C1731955632182.1731955649184 2024-11-18T18:47:29,189 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35367 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:29,189 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:40042 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data8]'}, localName='127.0.0.1:32877', datanodeUuid='37423efd-9aaf-41a5-9170-73afd3458851', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741840_1022 to mirror 127.0.0.1:35367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:29,189 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK], DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:29,189 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:40042 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T18:47:29,189 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741840_1022 2024-11-18T18:47:29,190 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:40042 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:32877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40042 dst: /127.0.0.1:32877 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:29,190 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK] 2024-11-18T18:47:29,191 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 after 4008ms 2024-11-18T18:47:29,193 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41919 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:29,193 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45256 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741841_1023 to mirror 127.0.0.1:41919 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:29,193 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]) is bad. 2024-11-18T18:47:29,193 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741841_1023 2024-11-18T18:47:29,194 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45256 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T18:47:29,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45256 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45256 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:29,194 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK] 2024-11-18T18:47:29,207 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T18:47:29,208 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:29,209 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:29,209 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:29,209 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:29,209 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:29,209 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955645159 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955649184 2024-11-18T18:47:29,210 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40245:40245),(127.0.0.1/127.0.0.1:40097:40097)] 2024-11-18T18:47:29,210 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 is not closed yet, will try archiving it next time 2024-11-18T18:47:29,210 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955645159 is not closed yet, will try archiving it next time 2024-11-18T18:47:29,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741839_1021 (size=2431) 2024-11-18T18:47:29,212 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 is not closed yet, will try archiving it next time 2024-11-18T18:47:29,548 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:30,305 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,210 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,212 WARN [ResponseProcessor for block BP-1625265116-172.17.0.2-1731955630168:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1625265116-172.17.0.2-1731955630168:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-1625265116-172.17.0.2-1731955630168:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,212 WARN [DataStreamer for file /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955649184 block BP-1625265116-172.17.0.2-1731955630168:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:31,212 WARN [PacketResponder: BP-1625265116-172.17.0.2-1731955630168:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:32877] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:31,213 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45266 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45266 dst: /127.0.0.1:46229 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:31,213 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:40058 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:32877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40058 dst: /127.0.0.1:32877 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:31,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de54521{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:31,259 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@16e41538{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:31,260 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:31,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e04641a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:31,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d39bcd7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:31,262 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:47:31,262 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:47:31,262 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:47:31,262 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1625265116-172.17.0.2-1731955630168 (Datanode Uuid 37423efd-9aaf-41a5-9170-73afd3458851) service to localhost/127.0.0.1:44763 2024-11-18T18:47:31,263 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data7/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:31,264 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data8/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:31,264 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:47:31,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35551 {}] regionserver.HRegion(8855): Flush requested on abb47c13756aec252f5492952169050f 2024-11-18T18:47:31,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing abb47c13756aec252f5492952169050f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T18:47:31,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/6eaa20c2f51245ffb3e92b24a93b762c is 1080, key is row0002/info:/1731955647204/Put/seqid=0 2024-11-18T18:47:31,297 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,297 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:31,297 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741843_1026 2024-11-18T18:47:31,298 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK] 2024-11-18T18:47:31,299 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,300 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]) is bad. 2024-11-18T18:47:31,300 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741844_1027 2024-11-18T18:47:31,300 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK] 2024-11-18T18:47:31,302 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,302 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK], DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:31,302 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741845_1028 2024-11-18T18:47:31,303 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:31,304 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,304 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:31,304 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741846_1029 2024-11-18T18:47:31,305 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] 2024-11-18T18:47:31,306 WARN [IPC Server handler 4 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T18:47:31,306 WARN [IPC Server handler 4 on default port 44763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T18:47:31,306 WARN [IPC Server handler 4 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T18:47:31,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741847_1030 (size=10347) 2024-11-18T18:47:31,549 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,710 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/6eaa20c2f51245ffb3e92b24a93b762c 2024-11-18T18:47:31,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/6eaa20c2f51245ffb3e92b24a93b762c as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6eaa20c2f51245ffb3e92b24a93b762c 2024-11-18T18:47:31,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6eaa20c2f51245ffb3e92b24a93b762c, entries=5, sequenceid=11, filesize=10.1 K 2024-11-18T18:47:31,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for abb47c13756aec252f5492952169050f in 455ms, sequenceid=11, compaction requested=false 2024-11-18T18:47:31,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for abb47c13756aec252f5492952169050f: 2024-11-18T18:47:31,790 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7544f40b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46229, datanodeUuid=a01454de-06ee-4862-bd8f-d790f28eb598, infoPort=40245, infoSecurePort=0, ipcPort=39369, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168):Failed to transfer BP-1625265116-172.17.0.2-1731955630168:blk_1073741847_1030 to 127.0.0.1:35367 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35551 {}] regionserver.HRegion(8855): Flush requested on abb47c13756aec252f5492952169050f 2024-11-18T18:47:31,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing abb47c13756aec252f5492952169050f 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-18T18:47:31,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/1b9a99d7650444aabbf943c1802c1e02 is 1080, key is row0007/info:/1731955651276/Put/seqid=0 2024-11-18T18:47:31,910 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,911 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK], DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:31,911 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741848_1031 2024-11-18T18:47:31,912 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] 2024-11-18T18:47:31,915 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41919 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,915 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45318 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741849_1032 to mirror 127.0.0.1:41919 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:31,915 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]) is bad. 2024-11-18T18:47:31,915 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741849_1032 2024-11-18T18:47:31,916 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45318 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:31,916 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45318 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45318 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:31,916 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK] 2024-11-18T18:47:31,918 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,918 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:31,918 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741850_1033 2024-11-18T18:47:31,918 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:31,921 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35367 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:31,921 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45334 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741851_1034 to mirror 127.0.0.1:35367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:31,921 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:31,921 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45334 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:31,921 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741851_1034 2024-11-18T18:47:31,921 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45334 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45334 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:31,922 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK] 2024-11-18T18:47:31,922 WARN [IPC Server handler 0 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T18:47:31,922 WARN [IPC Server handler 0 on default port 44763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T18:47:31,922 WARN [IPC Server handler 0 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T18:47:31,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741852_1035 (size=12506) 2024-11-18T18:47:32,305 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:32,327 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/1b9a99d7650444aabbf943c1802c1e02 2024-11-18T18:47:32,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/1b9a99d7650444aabbf943c1802c1e02 as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/1b9a99d7650444aabbf943c1802c1e02 2024-11-18T18:47:32,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/1b9a99d7650444aabbf943c1802c1e02, entries=7, sequenceid=24, filesize=12.2 K 2024-11-18T18:47:32,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for abb47c13756aec252f5492952169050f in 443ms, sequenceid=24, compaction requested=false 2024-11-18T18:47:32,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for abb47c13756aec252f5492952169050f: 2024-11-18T18:47:32,346 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-18T18:47:32,346 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:47:32,346 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/1b9a99d7650444aabbf943c1802c1e02 because midkey is the same as first or last row 2024-11-18T18:47:33,211 WARN [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]] 2024-11-18T18:47:33,211 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,211 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C35551%2C1731955632182:(num 1731955649184) roll requested 2024-11-18T18:47:33,212 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C35551%2C1731955632182.1731955653211 2024-11-18T18:47:33,214 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,214 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:33,214 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741853_1036 2024-11-18T18:47:33,215 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK] 2024-11-18T18:47:33,216 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,216 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK], DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:33,216 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741854_1037 2024-11-18T18:47:33,217 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:33,219 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38853 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,219 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45356 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741855_1038 to mirror 127.0.0.1:38853 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:33,219 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:33,219 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45356 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T18:47:33,219 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741855_1038 2024-11-18T18:47:33,219 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45356 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45356 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:33,220 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] 2024-11-18T18:47:33,221 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,222 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]) is bad. 2024-11-18T18:47:33,222 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741856_1039 2024-11-18T18:47:33,222 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK] 2024-11-18T18:47:33,223 WARN [IPC Server handler 2 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T18:47:33,223 WARN [IPC Server handler 2 on default port 44763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T18:47:33,223 WARN [IPC Server handler 2 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T18:47:33,225 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:33,226 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:33,226 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:33,226 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:33,226 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:33,226 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955649184 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955653211 2024-11-18T18:47:33,227 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40245:40245)] 2024-11-18T18:47:33,227 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 is not closed yet, will try archiving it next time 2024-11-18T18:47:33,227 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955649184 is not closed yet, will try archiving it next time 2024-11-18T18:47:33,227 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955645159 to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs/39fff3b0f89c%2C35551%2C1731955632182.1731955645159 2024-11-18T18:47:33,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741842_1025 (size=25992) 2024-11-18T18:47:33,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35551 {}] regionserver.HRegion(8855): Flush requested on abb47c13756aec252f5492952169050f 2024-11-18T18:47:33,325 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing abb47c13756aec252f5492952169050f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T18:47:33,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/2c7f246abc164aa1a687e6ed4da8afb0 is 1079, key is tmprow/info:/1731955653323/Put/seqid=0 2024-11-18T18:47:33,333 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,333 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK], DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:33,333 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741858_1041 2024-11-18T18:47:33,334 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] 2024-11-18T18:47:33,337 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35367 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,337 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45376 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741859_1042 to mirror 127.0.0.1:35367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:33,337 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:33,337 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741859_1042 2024-11-18T18:47:33,337 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45376 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:33,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45376 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45376 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:33,338 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK] 2024-11-18T18:47:33,340 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,340 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]) is bad. 2024-11-18T18:47:33,340 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741860_1043 2024-11-18T18:47:33,341 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK] 2024-11-18T18:47:33,343 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,344 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:33,344 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741861_1044 2024-11-18T18:47:33,344 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:33,345 WARN [IPC Server handler 1 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T18:47:33,345 WARN [IPC Server handler 1 on default port 44763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T18:47:33,345 WARN [IPC Server handler 1 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T18:47:33,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741862_1045 (size=6027) 2024-11-18T18:47:33,549 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,629 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 is not closed yet, will try archiving it next time 2024-11-18T18:47:33,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/2c7f246abc164aa1a687e6ed4da8afb0 2024-11-18T18:47:33,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/2c7f246abc164aa1a687e6ed4da8afb0 as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/2c7f246abc164aa1a687e6ed4da8afb0 2024-11-18T18:47:33,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/2c7f246abc164aa1a687e6ed4da8afb0, entries=1, sequenceid=34, filesize=5.9 K 2024-11-18T18:47:33,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for abb47c13756aec252f5492952169050f in 442ms, sequenceid=34, compaction requested=true 2024-11-18T18:47:33,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for abb47c13756aec252f5492952169050f: 2024-11-18T18:47:33,767 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-18T18:47:33,767 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:47:33,767 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/1b9a99d7650444aabbf943c1802c1e02 because midkey is the same as first or last row 2024-11-18T18:47:33,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store abb47c13756aec252f5492952169050f:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:47:33,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:47:33,768 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:47:33,770 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:47:33,770 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HStore(1541): abb47c13756aec252f5492952169050f/info is initiating minor compaction (all files) 2024-11-18T18:47:33,770 INFO [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of abb47c13756aec252f5492952169050f/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:33,770 INFO [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6eaa20c2f51245ffb3e92b24a93b762c, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/1b9a99d7650444aabbf943c1802c1e02, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/2c7f246abc164aa1a687e6ed4da8afb0] into tmpdir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp, totalSize=28.2 K 2024-11-18T18:47:33,771 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6eaa20c2f51245ffb3e92b24a93b762c, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731955647204 2024-11-18T18:47:33,771 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b9a99d7650444aabbf943c1802c1e02, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731955651276 2024-11-18T18:47:33,772 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2c7f246abc164aa1a687e6ed4da8afb0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731955653323 2024-11-18T18:47:33,789 INFO [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): abb47c13756aec252f5492952169050f#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:47:33,790 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/ff840ea2eeff442bbfa2e85b63ddf795 is 1080, key is row0002/info:/1731955647204/Put/seqid=0 2024-11-18T18:47:33,792 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,792 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]) is bad. 2024-11-18T18:47:33,792 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741863_1046 2024-11-18T18:47:33,793 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK] 2024-11-18T18:47:33,795 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32877 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,795 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45426 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741864_1047 to mirror 127.0.0.1:32877 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:33,795 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:33,796 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741864_1047 2024-11-18T18:47:33,796 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45426 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:33,796 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45426 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45426 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:33,796 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:33,798 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38853 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,798 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45432 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741865_1048 to mirror 127.0.0.1:38853 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:33,799 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:33,799 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741865_1048 2024-11-18T18:47:33,799 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45432 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:33,799 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45432 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45432 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:33,799 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] 2024-11-18T18:47:33,802 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35367 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:33,802 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45446 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741866_1049 to mirror 127.0.0.1:35367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:33,802 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:33,802 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741866_1049 2024-11-18T18:47:33,802 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45446 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:33,802 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:45446 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45446 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:33,803 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK] 2024-11-18T18:47:33,804 WARN [IPC Server handler 2 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T18:47:33,804 WARN [IPC Server handler 2 on default port 44763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T18:47:33,804 WARN [IPC Server handler 2 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T18:47:33,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741867_1050 (size=17994) 2024-11-18T18:47:34,219 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/ff840ea2eeff442bbfa2e85b63ddf795 as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/ff840ea2eeff442bbfa2e85b63ddf795 2024-11-18T18:47:34,228 INFO [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in abb47c13756aec252f5492952169050f/info of abb47c13756aec252f5492952169050f into ff840ea2eeff442bbfa2e85b63ddf795(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:47:34,228 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for abb47c13756aec252f5492952169050f: 2024-11-18T18:47:34,228 INFO [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f., storeName=abb47c13756aec252f5492952169050f/info, priority=13, startTime=1731955653768; duration=0sec 2024-11-18T18:47:34,228 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T18:47:34,228 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:47:34,228 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/ff840ea2eeff442bbfa2e85b63ddf795 because midkey is the same as first or last row 2024-11-18T18:47:34,228 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T18:47:34,228 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:47:34,229 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/ff840ea2eeff442bbfa2e85b63ddf795 because midkey is the same as first or last row 2024-11-18T18:47:34,229 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T18:47:34,229 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:47:34,229 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/ff840ea2eeff442bbfa2e85b63ddf795 because midkey is the same as first or last row 2024-11-18T18:47:34,229 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:47:34,229 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: abb47c13756aec252f5492952169050f:info 2024-11-18T18:47:34,306 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:34,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35551 {}] regionserver.HRegion(8855): Flush requested on abb47c13756aec252f5492952169050f 2024-11-18T18:47:34,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing abb47c13756aec252f5492952169050f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T18:47:34,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/29ee239dd590449b84f531ae9fa7ee5d is 1079, key is tmprow/info:/1731955654745/Put/seqid=0 2024-11-18T18:47:34,753 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:34,753 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]) is bad. 2024-11-18T18:47:34,753 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741868_1051 2024-11-18T18:47:34,754 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK] 2024-11-18T18:47:34,756 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:34,756 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:34,756 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741869_1052 2024-11-18T18:47:34,757 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK] 2024-11-18T18:47:34,759 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:34,759 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:34,759 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741870_1053 2024-11-18T18:47:34,759 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] 2024-11-18T18:47:34,763 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32877 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:34,763 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50276 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741871_1054 to mirror 127.0.0.1:32877 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:34,763 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:34,763 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741871_1054 2024-11-18T18:47:34,763 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50276 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:34,763 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50276 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50276 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:34,764 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:34,764 WARN [IPC Server handler 3 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T18:47:34,765 WARN [IPC Server handler 3 on default port 44763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T18:47:34,765 WARN [IPC Server handler 3 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T18:47:34,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741872_1055 (size=6027) 2024-11-18T18:47:34,777 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7544f40b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46229, datanodeUuid=a01454de-06ee-4862-bd8f-d790f28eb598, infoPort=40245, infoSecurePort=0, ipcPort=39369, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168):Failed to transfer BP-1625265116-172.17.0.2-1731955630168:blk_1073741852_1035 to 127.0.0.1:38853 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:34,778 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c821269[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46229, datanodeUuid=a01454de-06ee-4862-bd8f-d790f28eb598, infoPort=40245, infoSecurePort=0, ipcPort=39369, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168):Failed to transfer BP-1625265116-172.17.0.2-1731955630168:blk_1073741842_1025 to 127.0.0.1:32877 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:35,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/29ee239dd590449b84f531ae9fa7ee5d 2024-11-18T18:47:35,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/29ee239dd590449b84f531ae9fa7ee5d as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/29ee239dd590449b84f531ae9fa7ee5d 2024-11-18T18:47:35,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/29ee239dd590449b84f531ae9fa7ee5d, entries=1, sequenceid=45, filesize=5.9 K 2024-11-18T18:47:35,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for abb47c13756aec252f5492952169050f in 441ms, sequenceid=45, compaction requested=false 2024-11-18T18:47:35,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for abb47c13756aec252f5492952169050f: 2024-11-18T18:47:35,187 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-18T18:47:35,187 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:47:35,187 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/ff840ea2eeff442bbfa2e85b63ddf795 because midkey is the same as first or last row 2024-11-18T18:47:35,227 WARN [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]] 2024-11-18T18:47:35,228 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:35,228 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C35551%2C1731955632182:(num 1731955653211) roll requested 2024-11-18T18:47:35,228 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C35551%2C1731955632182.1731955655228 2024-11-18T18:47:35,231 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:35,231 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:35,231 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741873_1056 2024-11-18T18:47:35,232 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK] 2024-11-18T18:47:35,234 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:35,234 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK], DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:35,234 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741874_1057 2024-11-18T18:47:35,234 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:35,236 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:35,237 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK], DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]) is bad. 2024-11-18T18:47:35,237 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741875_1058 2024-11-18T18:47:35,237 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK] 2024-11-18T18:47:35,239 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:35,239 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:35,239 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741876_1059 2024-11-18T18:47:35,240 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] 2024-11-18T18:47:35,241 WARN [IPC Server handler 3 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T18:47:35,241 WARN [IPC Server handler 3 on default port 44763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T18:47:35,241 WARN [IPC Server handler 3 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T18:47:35,248 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:35,248 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:35,248 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:35,248 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:35,248 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:35,249 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955653211 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955655228 2024-11-18T18:47:35,250 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40245:40245)] 2024-11-18T18:47:35,250 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 is not closed yet, will try archiving it next time 2024-11-18T18:47:35,250 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955653211 is not closed yet, will try archiving it next time 2024-11-18T18:47:35,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741857_1040 (size=13591) 2024-11-18T18:47:35,251 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955649184 to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs/39fff3b0f89c%2C35551%2C1731955632182.1731955649184 2024-11-18T18:47:35,549 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:35,651 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 is not closed yet, will try archiving it next time 2024-11-18T18:47:35,777 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7544f40b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46229, datanodeUuid=a01454de-06ee-4862-bd8f-d790f28eb598, infoPort=40245, infoSecurePort=0, ipcPort=39369, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168):Failed to transfer BP-1625265116-172.17.0.2-1731955630168:blk_1073741867_1050 to 127.0.0.1:32877 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:35,777 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c821269[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46229, datanodeUuid=a01454de-06ee-4862-bd8f-d790f28eb598, infoPort=40245, infoSecurePort=0, ipcPort=39369, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168):Failed to transfer BP-1625265116-172.17.0.2-1731955630168:blk_1073741862_1045 to 127.0.0.1:35367 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:36,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35551 {}] regionserver.HRegion(8855): Flush requested on abb47c13756aec252f5492952169050f 2024-11-18T18:47:36,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing abb47c13756aec252f5492952169050f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T18:47:36,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/6a399a3af82f4bd9ba74ce2c6cd5163e is 1079, key is tmprow/info:/1731955656164/Put/seqid=0 2024-11-18T18:47:36,174 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38853 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:36,174 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50298 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741878_1061 to mirror 127.0.0.1:38853 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:36,174 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:36,174 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741878_1061 2024-11-18T18:47:36,174 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50298 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:36,174 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50298 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50298 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:36,175 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] 2024-11-18T18:47:36,176 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:36,176 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK], DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]) is bad. 2024-11-18T18:47:36,176 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741879_1062 2024-11-18T18:47:36,177 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK] 2024-11-18T18:47:36,178 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:36,179 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:36,179 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741880_1063 2024-11-18T18:47:36,179 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK] 2024-11-18T18:47:36,182 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32877 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:36,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50306 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741881_1064 to mirror 127.0.0.1:32877 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:36,182 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:36,182 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741881_1064 2024-11-18T18:47:36,182 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50306 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:36,182 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50306 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50306 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:36,183 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:36,184 WARN [IPC Server handler 2 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T18:47:36,184 WARN [IPC Server handler 2 on default port 44763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T18:47:36,184 WARN [IPC Server handler 2 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T18:47:36,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741882_1065 (size=6027) 2024-11-18T18:47:36,306 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:36,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/6a399a3af82f4bd9ba74ce2c6cd5163e 2024-11-18T18:47:36,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/6a399a3af82f4bd9ba74ce2c6cd5163e as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6a399a3af82f4bd9ba74ce2c6cd5163e 2024-11-18T18:47:36,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6a399a3af82f4bd9ba74ce2c6cd5163e, entries=1, sequenceid=55, filesize=5.9 K 2024-11-18T18:47:36,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for abb47c13756aec252f5492952169050f in 439ms, sequenceid=55, compaction requested=true 2024-11-18T18:47:36,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for abb47c13756aec252f5492952169050f: 2024-11-18T18:47:36,605 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-18T18:47:36,605 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:47:36,605 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/ff840ea2eeff442bbfa2e85b63ddf795 because midkey is the same as first or last row 2024-11-18T18:47:36,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store abb47c13756aec252f5492952169050f:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:47:36,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:47:36,606 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:47:36,607 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:47:36,607 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HStore(1541): abb47c13756aec252f5492952169050f/info is initiating minor compaction (all files) 2024-11-18T18:47:36,607 INFO [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of abb47c13756aec252f5492952169050f/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:36,608 INFO [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/ff840ea2eeff442bbfa2e85b63ddf795, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/29ee239dd590449b84f531ae9fa7ee5d, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6a399a3af82f4bd9ba74ce2c6cd5163e] into tmpdir=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp, totalSize=29.3 K 2024-11-18T18:47:36,608 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] compactions.Compactor(225): Compacting ff840ea2eeff442bbfa2e85b63ddf795, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731955647204 2024-11-18T18:47:36,609 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] compactions.Compactor(225): Compacting 29ee239dd590449b84f531ae9fa7ee5d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731955654745 2024-11-18T18:47:36,609 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6a399a3af82f4bd9ba74ce2c6cd5163e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731955656164 2024-11-18T18:47:36,628 INFO [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): abb47c13756aec252f5492952169050f#info#compaction#24 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:47:36,628 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/342e08a57fcd40c4b14915ca2a676c8e is 1080, key is row0002/info:/1731955647204/Put/seqid=0 2024-11-18T18:47:36,630 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:36,630 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:36,630 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741883_1066 2024-11-18T18:47:36,631 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:36,633 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38853 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:36,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50322 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741884_1067 to mirror 127.0.0.1:38853 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:36,634 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]) is bad. 2024-11-18T18:47:36,634 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741884_1067 2024-11-18T18:47:36,634 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50322 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:36,634 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50322 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50322 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:36,634 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38853,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK] 2024-11-18T18:47:36,637 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35367 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:36,637 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50334 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741885_1068 to mirror 127.0.0.1:35367 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:36,637 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:36,637 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741885_1068 2024-11-18T18:47:36,637 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50334 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:36,637 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50334 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50334 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:36,638 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK] 2024-11-18T18:47:36,640 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41919 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:36,640 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50338 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741886_1069 to mirror 127.0.0.1:41919 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:36,641 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]) is bad. 2024-11-18T18:47:36,641 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741886_1069 2024-11-18T18:47:36,641 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50338 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:36,641 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:50338 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50338 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:36,641 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK] 2024-11-18T18:47:36,642 WARN [IPC Server handler 3 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T18:47:36,642 WARN [IPC Server handler 3 on default port 44763 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T18:47:36,642 WARN [IPC Server handler 3 on default port 44763 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T18:47:36,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741887_1070 (size=18097) 2024-11-18T18:47:36,653 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/342e08a57fcd40c4b14915ca2a676c8e as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/342e08a57fcd40c4b14915ca2a676c8e 2024-11-18T18:47:36,662 INFO [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in abb47c13756aec252f5492952169050f/info of abb47c13756aec252f5492952169050f into 342e08a57fcd40c4b14915ca2a676c8e(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:47:36,662 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for abb47c13756aec252f5492952169050f: 2024-11-18T18:47:36,662 INFO [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f., storeName=abb47c13756aec252f5492952169050f/info, priority=13, startTime=1731955656605; duration=0sec 2024-11-18T18:47:36,662 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-18T18:47:36,662 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:47:36,663 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/342e08a57fcd40c4b14915ca2a676c8e because midkey is the same as first or last row 2024-11-18T18:47:36,663 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-18T18:47:36,663 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:47:36,663 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/342e08a57fcd40c4b14915ca2a676c8e because midkey is the same as first or last row 2024-11-18T18:47:36,663 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-18T18:47:36,663 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:47:36,663 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/342e08a57fcd40c4b14915ca2a676c8e because midkey is the same as first or last row 2024-11-18T18:47:36,663 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:47:36,663 DEBUG [RS:0;39fff3b0f89c:35551-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: abb47c13756aec252f5492952169050f:info 2024-11-18T18:47:37,251 WARN [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-18T18:47:37,251 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:37,394 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:37,397 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:37,398 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:37,398 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:37,398 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:47:37,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f9cc2c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:37,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27dc8a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:37,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@d3b3ece{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/java.io.tmpdir/jetty-localhost-43307-hadoop-hdfs-3_4_1-tests_jar-_-any-845034408675694003/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:37,512 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c1d96{HTTP/1.1, (http/1.1)}{localhost:43307} 2024-11-18T18:47:37,512 INFO [Time-limited test {}] server.Server(415): Started @134901ms 2024-11-18T18:47:37,513 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:47:37,550 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:37,777 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c821269[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46229, datanodeUuid=a01454de-06ee-4862-bd8f-d790f28eb598, infoPort=40245, infoSecurePort=0, ipcPort=39369, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168):Failed to transfer BP-1625265116-172.17.0.2-1731955630168:blk_1073741857_1040 to 127.0.0.1:41919 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:37,777 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7544f40b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46229, datanodeUuid=a01454de-06ee-4862-bd8f-d790f28eb598, infoPort=40245, infoSecurePort=0, ipcPort=39369, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168):Failed to transfer BP-1625265116-172.17.0.2-1731955630168:blk_1073741872_1055 to 127.0.0.1:32877 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:37,935 WARN [Thread-992 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:47:37,943 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e533e6f7f2685a5 with lease ID 0x7a03a54b6d62c319: from storage DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5 node DatanodeRegistration(127.0.0.1:41501, datanodeUuid=056ed252-36c4-4fce-a591-0395ffe8d5bd, infoPort=40327, infoSecurePort=0, ipcPort=43759, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:37,943 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6e533e6f7f2685a5 with lease ID 0x7a03a54b6d62c319: from storage DS-c879dd95-44e2-47ea-a3ee-fc8171daefce node DatanodeRegistration(127.0.0.1:41501, datanodeUuid=056ed252-36c4-4fce-a591-0395ffe8d5bd, infoPort=40327, infoSecurePort=0, ipcPort=43759, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:47:38,307 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:38,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741887_1070 (size=18097) 2024-11-18T18:47:38,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741882_1065 (size=6027) 2024-11-18T18:47:39,251 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:39,550 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:40,307 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:41,252 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:41,550 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:41,997 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T18:47:42,308 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:42,528 ERROR [FSHLog-0-hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData-prefix:39fff3b0f89c,38533,1731955632018 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:42,529 WARN [FSHLog-0-hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData-prefix:39fff3b0f89c,38533,1731955632018 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:42,529 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C38533%2C1731955632018:(num 1731955632334) roll requested 2024-11-18T18:47:42,529 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C38533%2C1731955632018.1731955662529 2024-11-18T18:47:42,533 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:42,534 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK]) is bad. 2024-11-18T18:47:42,534 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741888_1071 2024-11-18T18:47:42,535 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35367,DS-9d27af4d-d90a-4dfa-bc02-b91d74251180,DISK] 2024-11-18T18:47:42,537 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:42,537 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK], DatanodeInfoWithStorage[127.0.0.1:41501,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]) is bad. 2024-11-18T18:47:42,537 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741889_1072 2024-11-18T18:47:42,538 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK] 2024-11-18T18:47:42,541 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32877 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:42,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1425183507_22 at /127.0.0.1:58856 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741890_1073] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data4]'}, localName='127.0.0.1:41501', datanodeUuid='056ed252-36c4-4fce-a591-0395ffe8d5bd', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741890_1073 to mirror 127.0.0.1:32877 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:42,541 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41501,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:42,541 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741890_1073 2024-11-18T18:47:42,541 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1425183507_22 at /127.0.0.1:58856 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741890_1073] {}] datanode.BlockReceiver(316): Block 1073741890 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T18:47:42,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1425183507_22 at /127.0.0.1:58856 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741890_1073] {}] datanode.DataXceiver(331): 127.0.0.1:41501:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58856 dst: /127.0.0.1:41501 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:42,542 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:42,547 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:42,547 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:42,547 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:42,547 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:42,547 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:42,548 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018/39fff3b0f89c%2C38533%2C1731955632018.1731955662529 2024-11-18T18:47:42,548 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:42,548 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:42,549 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 2024-11-18T18:47:42,549 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40245:40245),(127.0.0.1/127.0.0.1:40327:40327)] 2024-11-18T18:47:42,549 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 is not closed yet, will try archiving it next time 2024-11-18T18:47:42,549 WARN [IPC Server handler 3 on default port 44763 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741830_1006 2024-11-18T18:47:42,550 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 after 1ms 2024-11-18T18:47:43,252 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:43,551 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:45,253 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:45,551 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:46,551 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 after 4002ms 2024-11-18T18:47:47,253 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:47,552 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:47,959 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1605c33c {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1625265116-172.17.0.2-1731955630168:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:41919,null,null]) java.net.ConnectException: Call From 39fff3b0f89c/172.17.0.2 to localhost:46443 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T18:47:47,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741833_1020 (size=455) 2024-11-18T18:47:48,212 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs/39fff3b0f89c%2C35551%2C1731955632182.1731955632822 2024-11-18T18:47:48,214 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955653211 to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs/39fff3b0f89c%2C35551%2C1731955632182.1731955653211 2024-11-18T18:47:49,253 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:49,552 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:49,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741833_1020 (size=455) 2024-11-18T18:47:51,065 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C35551%2C1731955632182.1731955671065 2024-11-18T18:47:51,069 WARN [Thread-1027 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32877 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:51,069 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1425183507_22 at /127.0.0.1:43036 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741892_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data4]'}, localName='127.0.0.1:41501', datanodeUuid='056ed252-36c4-4fce-a591-0395ffe8d5bd', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741892_1076 to mirror 127.0.0.1:32877 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:51,070 WARN [Thread-1027 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41501,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:51,070 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1425183507_22 at /127.0.0.1:43036 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741892_1076] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T18:47:51,070 WARN [Thread-1027 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741892_1076 2024-11-18T18:47:51,070 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1425183507_22 at /127.0.0.1:43036 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741892_1076] {}] datanode.DataXceiver(331): 127.0.0.1:41501:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43036 dst: /127.0.0.1:41501 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:51,071 WARN [Thread-1027 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:51,078 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,078 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,078 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,078 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,078 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,078 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955655228 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955671065 2024-11-18T18:47:51,079 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40245:40245),(127.0.0.1/127.0.0.1:40327:40327)] 2024-11-18T18:47:51,079 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955655228 is not closed yet, will try archiving it next time 2024-11-18T18:47:51,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741877_1060 (size=12911) 2024-11-18T18:47:51,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35551 {}] regionserver.HRegion(8855): Flush requested on abb47c13756aec252f5492952169050f 2024-11-18T18:47:51,084 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing abb47c13756aec252f5492952169050f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T18:47:51,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/544dd23bcc5c446fa5487e8859d4ef06 is 1080, key is row0013/info:/1731955671081/Put/seqid=0 2024-11-18T18:47:51,094 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32877 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:51,093 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:32944 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741894_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6]'}, localName='127.0.0.1:46229', datanodeUuid='a01454de-06ee-4862-bd8f-d790f28eb598', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741894_1078 to mirror 127.0.0.1:32877 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:51,094 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:51,094 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741894_1078 2024-11-18T18:47:51,094 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:32944 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741894_1078] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:51,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:32944 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741894_1078] {}] datanode.DataXceiver(331): 127.0.0.1:46229:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32944 dst: /127.0.0.1:46229 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:51,095 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:51,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741895_1079 (size=8190) 2024-11-18T18:47:51,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741895_1079 (size=8190) 2024-11-18T18:47:51,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/544dd23bcc5c446fa5487e8859d4ef06 2024-11-18T18:47:51,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/544dd23bcc5c446fa5487e8859d4ef06 as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/544dd23bcc5c446fa5487e8859d4ef06 2024-11-18T18:47:51,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/544dd23bcc5c446fa5487e8859d4ef06, entries=3, sequenceid=66, filesize=8.0 K 2024-11-18T18:47:51,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for abb47c13756aec252f5492952169050f in 38ms, sequenceid=66, compaction requested=false 2024-11-18T18:47:51,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for abb47c13756aec252f5492952169050f: 2024-11-18T18:47:51,122 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-18T18:47:51,122 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:47:51,122 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/342e08a57fcd40c4b14915ca2a676c8e because midkey is the same as first or last row 2024-11-18T18:47:51,254 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-18T18:47:51,254 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:51,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T18:47:51,308 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:47:51,309 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:47:51,309 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:51,309 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:51,309 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T18:47:51,309 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T18:47:51,309 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1809187819, stopped=false 2024-11-18T18:47:51,309 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39fff3b0f89c,38533,1731955632018 2024-11-18T18:47:51,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36311-0x101508f59690002, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:47:51,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36311-0x101508f59690002, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:51,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:47:51,363 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:47:51,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:47:51,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:51,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:51,363 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:47:51,363 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:47:51,363 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:51,364 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39fff3b0f89c,35551,1731955632182' ***** 2024-11-18T18:47:51,364 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T18:47:51,364 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36311-0x101508f59690002, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:47:51,364 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39fff3b0f89c,36311,1731955633450' ***** 2024-11-18T18:47:51,364 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T18:47:51,364 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:47:51,364 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T18:47:51,364 INFO [RS:1;39fff3b0f89c:36311 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T18:47:51,364 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T18:47:51,364 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T18:47:51,364 INFO [RS:1;39fff3b0f89c:36311 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T18:47:51,364 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer(959): stopping server 39fff3b0f89c,36311,1731955633450 2024-11-18T18:47:51,364 INFO [RS:0;39fff3b0f89c:35551 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T18:47:51,364 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:47:51,364 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:47:51,364 INFO [RS:0;39fff3b0f89c:35551 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T18:47:51,364 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T18:47:51,364 INFO [RS:1;39fff3b0f89c:36311 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;39fff3b0f89c:36311. 2024-11-18T18:47:51,364 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(3091): Received CLOSE for abb47c13756aec252f5492952169050f 2024-11-18T18:47:51,364 DEBUG [RS:1;39fff3b0f89c:36311 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:47:51,364 DEBUG [RS:1;39fff3b0f89c:36311 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:51,364 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer(976): stopping server 39fff3b0f89c,36311,1731955633450; all regions closed. 2024-11-18T18:47:51,365 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(959): stopping server 39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:51,365 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:47:51,365 INFO [RS:0;39fff3b0f89c:35551 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39fff3b0f89c:35551. 2024-11-18T18:47:51,365 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing abb47c13756aec252f5492952169050f, disabling compactions & flushes 2024-11-18T18:47:51,365 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:51,365 DEBUG [RS:0;39fff3b0f89c:35551 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:47:51,365 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:51,365 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,365 DEBUG [RS:0;39fff3b0f89c:35551 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:51,365 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. after waiting 0 ms 2024-11-18T18:47:51,365 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,365 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:51,365 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,365 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T18:47:51,365 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T18:47:51,365 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,365 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing abb47c13756aec252f5492952169050f 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-18T18:47:51,365 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T18:47:51,365 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T18:47:51,366 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,366 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T18:47:51,366 DEBUG [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, abb47c13756aec252f5492952169050f=TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.} 2024-11-18T18:47:51,366 DEBUG [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, abb47c13756aec252f5492952169050f 2024-11-18T18:47:51,366 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:47:51,366 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:47:51,366 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:47:51,366 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:47:51,366 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:51,366 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:47:51,366 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-18T18:47:51,366 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:51,366 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 2024-11-18T18:47:51,366 ERROR [FSHLog-0-hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5-prefix:39fff3b0f89c,35551,1731955632182.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:51,367 WARN [FSHLog-0-hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5-prefix:39fff3b0f89c,35551,1731955632182.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:51,367 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C35551%2C1731955632182.meta:.meta(num 1731955633205) roll requested 2024-11-18T18:47:51,367 WARN [IPC Server handler 4 on default port 44763 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741837_1013 2024-11-18T18:47:51,367 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C35551%2C1731955632182.meta.1731955671367.meta 2024-11-18T18:47:51,367 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 after 1ms 2024-11-18T18:47:51,370 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:51,370 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK], DatanodeInfoWithStorage[127.0.0.1:46229,DS-412d2716-6e79-4d32-b104-cd57bff1b69d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:51,370 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741896_1081 2024-11-18T18:47:51,371 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:51,371 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/00bb2b7b6e4246a8b1d5e1addd1820fd is 1080, key is row0015/info:/1731955671085/Put/seqid=0 2024-11-18T18:47:51,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741898_1083 (size=14660) 2024-11-18T18:47:51,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741898_1083 (size=14660) 2024-11-18T18:47:51,377 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/00bb2b7b6e4246a8b1d5e1addd1820fd 2024-11-18T18:47:51,380 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,380 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,381 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,381 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,381 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,381 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955671367.meta 2024-11-18T18:47:51,382 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:51,382 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41919,DS-be88c8a9-50e9-470c-b3d3-3523e44434ed,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:51,382 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta 2024-11-18T18:47:51,382 WARN [IPC Server handler 3 on default port 44763 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta has not been closed. Lease recovery is in progress. RecoveryId = 1084 for block blk_1073741834_1010 2024-11-18T18:47:51,383 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta after 1ms 2024-11-18T18:47:51,386 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/.tmp/info/00bb2b7b6e4246a8b1d5e1addd1820fd as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/00bb2b7b6e4246a8b1d5e1addd1820fd 2024-11-18T18:47:51,393 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/00bb2b7b6e4246a8b1d5e1addd1820fd, entries=9, sequenceid=78, filesize=14.3 K 2024-11-18T18:47:51,394 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for abb47c13756aec252f5492952169050f in 29ms, sequenceid=78, compaction requested=true 2024-11-18T18:47:51,403 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40327:40327),(127.0.0.1/127.0.0.1:40245:40245)] 2024-11-18T18:47:51,404 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta is not closed yet, will try archiving it next time 2024-11-18T18:47:51,404 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6eaa20c2f51245ffb3e92b24a93b762c, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/1b9a99d7650444aabbf943c1802c1e02, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/ff840ea2eeff442bbfa2e85b63ddf795, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/2c7f246abc164aa1a687e6ed4da8afb0, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/29ee239dd590449b84f531ae9fa7ee5d, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6a399a3af82f4bd9ba74ce2c6cd5163e] to archive 2024-11-18T18:47:51,405 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T18:47:51,407 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6eaa20c2f51245ffb3e92b24a93b762c to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6eaa20c2f51245ffb3e92b24a93b762c 2024-11-18T18:47:51,409 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/1b9a99d7650444aabbf943c1802c1e02 to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/1b9a99d7650444aabbf943c1802c1e02 2024-11-18T18:47:51,411 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/ff840ea2eeff442bbfa2e85b63ddf795 to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/ff840ea2eeff442bbfa2e85b63ddf795 2024-11-18T18:47:51,413 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/2c7f246abc164aa1a687e6ed4da8afb0 to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/2c7f246abc164aa1a687e6ed4da8afb0 2024-11-18T18:47:51,414 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/29ee239dd590449b84f531ae9fa7ee5d to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/29ee239dd590449b84f531ae9fa7ee5d 2024-11-18T18:47:51,415 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6a399a3af82f4bd9ba74ce2c6cd5163e to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/info/6a399a3af82f4bd9ba74ce2c6cd5163e 2024-11-18T18:47:51,416 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=39fff3b0f89c:38533 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-18T18:47:51,417 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6eaa20c2f51245ffb3e92b24a93b762c=10347, 1b9a99d7650444aabbf943c1802c1e02=12506, ff840ea2eeff442bbfa2e85b63ddf795=17994, 2c7f246abc164aa1a687e6ed4da8afb0=6027, 29ee239dd590449b84f531ae9fa7ee5d=6027, 6a399a3af82f4bd9ba74ce2c6cd5163e=6027] 2024-11-18T18:47:51,426 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/.tmp/info/44055d5a6a7e4208b901922e2f1ba726 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f./info:regioninfo/1731955633954/Put/seqid=0 2024-11-18T18:47:51,428 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/default/TestLogRolling-testLogRollOnDatanodeDeath/abb47c13756aec252f5492952169050f/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-18T18:47:51,429 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:51,429 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for abb47c13756aec252f5492952169050f: Waiting for close lock at 1731955671365Running coprocessor pre-close hooks at 1731955671365Disabling compacts and flushes for region at 1731955671365Disabling writes for close at 1731955671365Obtaining lock to block concurrent updates at 1731955671365Preparing flush snapshotting stores in abb47c13756aec252f5492952169050f at 1731955671365Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1731955671366 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. at 1731955671367 (+1 ms)Flushing abb47c13756aec252f5492952169050f/info: creating writer at 1731955671367Flushing abb47c13756aec252f5492952169050f/info: appending metadata at 1731955671371 (+4 ms)Flushing abb47c13756aec252f5492952169050f/info: closing flushed file at 1731955671371Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56e52983: reopening flushed file at 1731955671385 (+14 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for abb47c13756aec252f5492952169050f in 29ms, sequenceid=78, compaction requested=true at 1731955671394 (+9 ms)Writing region close event to WAL at 1731955671419 (+25 ms)Running coprocessor post-close hooks at 1731955671429 (+10 ms)Closed at 1731955671429 2024-11-18T18:47:51,429 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731955633587.abb47c13756aec252f5492952169050f. 2024-11-18T18:47:51,432 WARN [Thread-1055 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32877 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:47:51,432 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:43080 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data4]'}, localName='127.0.0.1:41501', datanodeUuid='056ed252-36c4-4fce-a591-0395ffe8d5bd', xmitsInProgress=0}:Exception transferring block BP-1625265116-172.17.0.2-1731955630168:blk_1073741899_1085 to mirror 127.0.0.1:32877 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:51,432 WARN [Thread-1055 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1625265116-172.17.0.2-1731955630168:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41501,DS-fe549449-2f68-4f8c-9db5-e6bbf35a73c5,DISK], DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK]) is bad. 2024-11-18T18:47:51,432 WARN [Thread-1055 {}] hdfs.DataStreamer(1850): Abandoning BP-1625265116-172.17.0.2-1731955630168:blk_1073741899_1085 2024-11-18T18:47:51,433 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:43080 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T18:47:51,433 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1762197410_22 at /127.0.0.1:43080 [Receiving block BP-1625265116-172.17.0.2-1731955630168:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:41501:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43080 dst: /127.0.0.1:41501 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:51,433 WARN [Thread-1055 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32877,DS-352928c3-29d3-46a8-9e52-55ea5eae6536,DISK] 2024-11-18T18:47:51,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741900_1086 (size=7089) 2024-11-18T18:47:51,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741900_1086 (size=7089) 2024-11-18T18:47:51,442 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/.tmp/info/44055d5a6a7e4208b901922e2f1ba726 2024-11-18T18:47:51,465 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/.tmp/ns/122d9fdf52124f6aa9d3f1b8e25d6f90 is 43, key is default/ns:d/1731955633305/Put/seqid=0 2024-11-18T18:47:51,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741901_1087 (size=5153) 2024-11-18T18:47:51,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741901_1087 (size=5153) 2024-11-18T18:47:51,472 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/.tmp/ns/122d9fdf52124f6aa9d3f1b8e25d6f90 2024-11-18T18:47:51,481 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.1731955655228 to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs/39fff3b0f89c%2C35551%2C1731955632182.1731955655228 2024-11-18T18:47:51,498 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/.tmp/table/17421288775648918a6d3ac7c0e149c7 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731955633967/Put/seqid=0 2024-11-18T18:47:51,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741902_1088 (size=5424) 2024-11-18T18:47:51,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741902_1088 (size=5424) 2024-11-18T18:47:51,505 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/.tmp/table/17421288775648918a6d3ac7c0e149c7 2024-11-18T18:47:51,512 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/.tmp/info/44055d5a6a7e4208b901922e2f1ba726 as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/info/44055d5a6a7e4208b901922e2f1ba726 2024-11-18T18:47:51,520 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/info/44055d5a6a7e4208b901922e2f1ba726, entries=10, sequenceid=11, filesize=6.9 K 2024-11-18T18:47:51,521 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/.tmp/ns/122d9fdf52124f6aa9d3f1b8e25d6f90 as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/ns/122d9fdf52124f6aa9d3f1b8e25d6f90 2024-11-18T18:47:51,529 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/ns/122d9fdf52124f6aa9d3f1b8e25d6f90, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T18:47:51,530 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/.tmp/table/17421288775648918a6d3ac7c0e149c7 as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/table/17421288775648918a6d3ac7c0e149c7 2024-11-18T18:47:51,537 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/table/17421288775648918a6d3ac7c0e149c7, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T18:47:51,538 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 172ms, sequenceid=11, compaction requested=false 2024-11-18T18:47:51,544 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T18:47:51,545 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:47:51,545 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:47:51,545 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955671366Running coprocessor pre-close hooks at 1731955671366Disabling compacts and flushes for region at 1731955671366Disabling writes for close at 1731955671366Obtaining lock to block concurrent updates at 1731955671366Preparing flush snapshotting stores in 1588230740 at 1731955671366Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731955671367 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731955671404 (+37 ms)Flushing 1588230740/info: creating writer at 1731955671404Flushing 1588230740/info: appending metadata at 1731955671426 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731955671426Flushing 1588230740/ns: creating writer at 1731955671449 (+23 ms)Flushing 1588230740/ns: appending metadata at 1731955671465 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731955671465Flushing 1588230740/table: creating writer at 1731955671479 (+14 ms)Flushing 1588230740/table: appending metadata at 1731955671497 (+18 ms)Flushing 1588230740/table: closing flushed file at 1731955671498 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@543a283a: reopening flushed file at 1731955671511 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78324379: reopening flushed file at 1731955671520 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c9ae96d: reopening flushed file at 1731955671529 (+9 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 172ms, sequenceid=11, compaction requested=false at 1731955671538 (+9 ms)Writing region close event to WAL at 1731955671540 (+2 ms)Running coprocessor post-close hooks at 1731955671545 (+5 ms)Closed at 1731955671545 2024-11-18T18:47:51,545 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T18:47:51,547 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T18:47:51,548 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T18:47:51,549 INFO [regionserver/39fff3b0f89c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:47:51,566 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(976): stopping server 39fff3b0f89c,35551,1731955632182; all regions closed. 2024-11-18T18:47:51,567 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,567 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,567 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,567 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,567 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:51,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741897_1082 (size=825) 2024-11-18T18:47:51,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741897_1082 (size=825) 2024-11-18T18:47:51,679 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T18:47:51,679 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T18:47:52,681 INFO [regionserver/39fff3b0f89c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:47:52,940 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4e8e1abd[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41501, datanodeUuid=056ed252-36c4-4fce-a591-0395ffe8d5bd, infoPort=40327, infoSecurePort=0, ipcPort=43759, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168):Failed to transfer BP-1625265116-172.17.0.2-1731955630168:blk_1073741835_1011 to 127.0.0.1:32877 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:52,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:47:53,323 INFO [master/39fff3b0f89c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T18:47:53,324 INFO [master/39fff3b0f89c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T18:47:53,939 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5e7fc8e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41501, datanodeUuid=056ed252-36c4-4fce-a591-0395ffe8d5bd, infoPort=40327, infoSecurePort=0, ipcPort=43759, storageInfo=lv=-57;cid=testClusterID;nsid=339842826;c=1731955630168):Failed to transfer BP-1625265116-172.17.0.2-1731955630168:blk_1073741829_1005 to 127.0.0.1:32877 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:53,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:47:55,369 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 after 4003ms 2024-11-18T18:47:55,384 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta after 4002ms 2024-11-18T18:47:55,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741877_1060 (size=12911) 2024-11-18T18:47:55,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:47:55,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:47:56,366 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-18T18:47:56,369 DEBUG [RS:1;39fff3b0f89c:36311 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs 2024-11-18T18:47:56,369 INFO [RS:1;39fff3b0f89c:36311 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C36311%2C1731955633450:(num 1731955633687) 2024-11-18T18:47:56,369 DEBUG [RS:1;39fff3b0f89c:36311 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:56,369 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:47:56,369 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:47:56,369 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.ChoreService(370): Chore service for: regionserver/39fff3b0f89c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T18:47:56,369 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T18:47:56,369 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:47:56,369 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T18:47:56,369 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T18:47:56,369 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:47:56,369 INFO [RS:1;39fff3b0f89c:36311 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36311 2024-11-18T18:47:56,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:47:56,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:47:56,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36311-0x101508f59690002, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39fff3b0f89c,36311,1731955633450 2024-11-18T18:47:56,412 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:47:56,413 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39fff3b0f89c,36311,1731955633450] 2024-11-18T18:47:56,429 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39fff3b0f89c,36311,1731955633450 already deleted, retry=false 2024-11-18T18:47:56,429 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39fff3b0f89c,36311,1731955633450 expired; onlineServers=1 2024-11-18T18:47:56,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,451 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,451 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,451 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36311-0x101508f59690002, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:56,521 INFO [RS:1;39fff3b0f89c:36311 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:47:56,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36311-0x101508f59690002, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:56,521 INFO [RS:1;39fff3b0f89c:36311 {}] regionserver.HRegionServer(1031): Exiting; stopping=39fff3b0f89c,36311,1731955633450; zookeeper connection closed. 2024-11-18T18:47:56,521 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@66d5216d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@66d5216d 2024-11-18T18:47:56,568 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-18T18:47:56,571 DEBUG [RS:0;39fff3b0f89c:35551 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs 2024-11-18T18:47:56,572 INFO [RS:0;39fff3b0f89c:35551 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C35551%2C1731955632182.meta:.meta(num 1731955671367) 2024-11-18T18:47:56,572 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:56,572 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:56,572 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:56,572 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:56,573 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:56,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741893_1077 (size=14682) 2024-11-18T18:47:56,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741893_1077 (size=14682) 2024-11-18T18:47:56,577 DEBUG [RS:0;39fff3b0f89c:35551 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs 2024-11-18T18:47:56,577 INFO [RS:0;39fff3b0f89c:35551 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C35551%2C1731955632182:(num 1731955671065) 2024-11-18T18:47:56,578 DEBUG [RS:0;39fff3b0f89c:35551 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:47:56,578 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:47:56,578 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:47:56,578 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.ChoreService(370): Chore service for: regionserver/39fff3b0f89c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T18:47:56,578 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:47:56,578 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:47:56,578 INFO [RS:0;39fff3b0f89c:35551 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35551 2024-11-18T18:47:56,587 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:47:56,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:47:56,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39fff3b0f89c,35551,1731955632182 2024-11-18T18:47:56,595 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39fff3b0f89c,35551,1731955632182] 2024-11-18T18:47:56,604 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39fff3b0f89c,35551,1731955632182 already deleted, retry=false 2024-11-18T18:47:56,604 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39fff3b0f89c,35551,1731955632182 expired; onlineServers=0 2024-11-18T18:47:56,604 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39fff3b0f89c,38533,1731955632018' ***** 2024-11-18T18:47:56,604 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T18:47:56,604 INFO [M:0;39fff3b0f89c:38533 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:47:56,604 INFO [M:0;39fff3b0f89c:38533 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:47:56,605 DEBUG [M:0;39fff3b0f89c:38533 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T18:47:56,605 DEBUG [M:0;39fff3b0f89c:38533 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T18:47:56,605 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T18:47:56,605 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955632530 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955632530,5,FailOnTimeoutGroup] 2024-11-18T18:47:56,605 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955632530 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955632530,5,FailOnTimeoutGroup] 2024-11-18T18:47:56,605 INFO [M:0;39fff3b0f89c:38533 {}] hbase.ChoreService(370): Chore service for: master/39fff3b0f89c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T18:47:56,605 INFO [M:0;39fff3b0f89c:38533 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:47:56,605 DEBUG [M:0;39fff3b0f89c:38533 {}] master.HMaster(1795): Stopping service threads 2024-11-18T18:47:56,605 INFO [M:0;39fff3b0f89c:38533 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T18:47:56,605 INFO [M:0;39fff3b0f89c:38533 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:47:56,606 INFO [M:0;39fff3b0f89c:38533 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T18:47:56,606 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T18:47:56,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T18:47:56,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:47:56,612 DEBUG [M:0;39fff3b0f89c:38533 {}] zookeeper.ZKUtil(347): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T18:47:56,612 WARN [M:0;39fff3b0f89c:38533 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T18:47:56,613 INFO [M:0;39fff3b0f89c:38533 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/.lastflushedseqids 2024-11-18T18:47:56,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741903_1089 (size=130) 2024-11-18T18:47:56,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741903_1089 (size=130) 2024-11-18T18:47:56,620 INFO [M:0;39fff3b0f89c:38533 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T18:47:56,620 INFO [M:0;39fff3b0f89c:38533 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T18:47:56,620 DEBUG [M:0;39fff3b0f89c:38533 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:47:56,620 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:56,620 DEBUG [M:0;39fff3b0f89c:38533 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:56,620 DEBUG [M:0;39fff3b0f89c:38533 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:47:56,620 DEBUG [M:0;39fff3b0f89c:38533 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:56,620 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-18T18:47:56,640 DEBUG [M:0;39fff3b0f89c:38533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a6d5894d2f9e4cb0819e63ac45210c89 is 82, key is hbase:meta,,1/info:regioninfo/1731955633255/Put/seqid=0 2024-11-18T18:47:56,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741904_1090 (size=5672) 2024-11-18T18:47:56,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741904_1090 (size=5672) 2024-11-18T18:47:56,645 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a6d5894d2f9e4cb0819e63ac45210c89 2024-11-18T18:47:56,668 DEBUG [M:0;39fff3b0f89c:38533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12b9d02c85be451bb3ef5ef6e82aa901 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731955633974/Put/seqid=0 2024-11-18T18:47:56,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741905_1091 (size=6256) 2024-11-18T18:47:56,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741905_1091 (size=6256) 2024-11-18T18:47:56,674 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12b9d02c85be451bb3ef5ef6e82aa901 2024-11-18T18:47:56,680 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 12b9d02c85be451bb3ef5ef6e82aa901 2024-11-18T18:47:56,696 DEBUG [M:0;39fff3b0f89c:38533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7415b52cf4ef4025844101ab672fc10e is 69, key is 39fff3b0f89c,35551,1731955632182/rs:state/1731955632651/Put/seqid=0 2024-11-18T18:47:56,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:56,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35551-0x101508f59690001, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:56,696 INFO [RS:0;39fff3b0f89c:35551 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:47:56,696 INFO [RS:0;39fff3b0f89c:35551 {}] regionserver.HRegionServer(1031): Exiting; stopping=39fff3b0f89c,35551,1731955632182; zookeeper connection closed. 2024-11-18T18:47:56,696 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@50beb114 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@50beb114 2024-11-18T18:47:56,696 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-18T18:47:56,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741906_1092 (size=5224) 2024-11-18T18:47:56,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741906_1092 (size=5224) 2024-11-18T18:47:56,701 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7415b52cf4ef4025844101ab672fc10e 2024-11-18T18:47:56,723 DEBUG [M:0;39fff3b0f89c:38533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7747f9173f6409c8c312feb81ac337c is 52, key is load_balancer_on/state:d/1731955633433/Put/seqid=0 2024-11-18T18:47:56,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741907_1093 (size=5056) 2024-11-18T18:47:56,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741907_1093 (size=5056) 2024-11-18T18:47:56,730 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7747f9173f6409c8c312feb81ac337c 2024-11-18T18:47:56,737 DEBUG [M:0;39fff3b0f89c:38533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a6d5894d2f9e4cb0819e63ac45210c89 as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a6d5894d2f9e4cb0819e63ac45210c89 2024-11-18T18:47:56,742 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a6d5894d2f9e4cb0819e63ac45210c89, entries=8, sequenceid=60, filesize=5.5 K 2024-11-18T18:47:56,743 DEBUG [M:0;39fff3b0f89c:38533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/12b9d02c85be451bb3ef5ef6e82aa901 as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/12b9d02c85be451bb3ef5ef6e82aa901 2024-11-18T18:47:56,750 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 12b9d02c85be451bb3ef5ef6e82aa901 2024-11-18T18:47:56,750 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/12b9d02c85be451bb3ef5ef6e82aa901, entries=6, sequenceid=60, filesize=6.1 K 2024-11-18T18:47:56,751 DEBUG [M:0;39fff3b0f89c:38533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7415b52cf4ef4025844101ab672fc10e as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7415b52cf4ef4025844101ab672fc10e 2024-11-18T18:47:56,757 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7415b52cf4ef4025844101ab672fc10e, entries=2, sequenceid=60, filesize=5.1 K 2024-11-18T18:47:56,758 DEBUG [M:0;39fff3b0f89c:38533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7747f9173f6409c8c312feb81ac337c as hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7747f9173f6409c8c312feb81ac337c 2024-11-18T18:47:56,764 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7747f9173f6409c8c312feb81ac337c, entries=1, sequenceid=60, filesize=4.9 K 2024-11-18T18:47:56,765 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=60, compaction requested=false 2024-11-18T18:47:56,766 INFO [M:0;39fff3b0f89c:38533 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:47:56,766 DEBUG [M:0;39fff3b0f89c:38533 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955676620Disabling compacts and flushes for region at 1731955676620Disabling writes for close at 1731955676620Obtaining lock to block concurrent updates at 1731955676620Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731955676620Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1731955676621 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731955676622 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731955676622Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731955676639 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731955676639Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731955676651 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731955676668 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731955676668Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731955676680 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731955676695 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731955676695Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731955676707 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731955676722 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731955676722Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31171eb4: reopening flushed file at 1731955676736 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@442b90ad: reopening flushed file at 1731955676743 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@726c908e: reopening flushed file at 1731955676750 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69f9ace9: reopening flushed file at 1731955676757 (+7 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=60, compaction requested=false at 1731955676765 (+8 ms)Writing region close event to WAL at 1731955676766 (+1 ms)Closed at 1731955676766 2024-11-18T18:47:56,767 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:56,767 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:56,767 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:56,767 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:56,767 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:47:56,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41501 is added to blk_1073741891_1074 (size=1045) 2024-11-18T18:47:56,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741891_1074 (size=1045) 2024-11-18T18:47:56,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46229 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:47:56,965 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T18:47:56,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,988 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:56,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:47:57,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:47:57,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:47:57,962 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@36b8b0c8 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1625265116-172.17.0.2-1731955630168:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:41919,null,null]) java.net.ConnectException: Call From 39fff3b0f89c/172.17.0.2 to localhost:46443 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T18:47:58,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:47:58,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:47:58,561 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/WALs/39fff3b0f89c,38533,1731955632018/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/oldWALs/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 2024-11-18T18:47:58,565 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/MasterData/oldWALs/39fff3b0f89c%2C38533%2C1731955632018.1731955632334 to hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/oldWALs/39fff3b0f89c%2C38533%2C1731955632018.1731955632334$masterlocalwal$ 2024-11-18T18:47:58,565 INFO [M:0;39fff3b0f89c:38533 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T18:47:58,565 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:47:58,565 INFO [M:0;39fff3b0f89c:38533 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38533 2024-11-18T18:47:58,565 INFO [M:0;39fff3b0f89c:38533 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:47:58,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:58,696 INFO [M:0;39fff3b0f89c:38533 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:47:58,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38533-0x101508f59690000, quorum=127.0.0.1:55514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:47:58,724 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@d3b3ece{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:58,724 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c1d96{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:58,725 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:58,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27dc8a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:58,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f9cc2c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:58,726 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:47:58,726 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:47:58,726 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1625265116-172.17.0.2-1731955630168 (Datanode Uuid 056ed252-36c4-4fce-a591-0395ffe8d5bd) service to localhost/127.0.0.1:44763 2024-11-18T18:47:58,726 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:47:58,726 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@762a3c73 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1625265116-172.17.0.2-1731955630168:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:41919,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:46443 , LocalHost:localPort 39fff3b0f89c/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T18:47:58,727 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@762a3c73 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1625265116-172.17.0.2-1731955630168:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:41501,null,null], DatanodeInfoWithStorage[127.0.0.1:41919,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1625265116-172.17.0.2-1731955630168 2024-11-18T18:47:58,727 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@762a3c73 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1625265116-172.17.0.2-1731955630168:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41501,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1625265116-172.17.0.2-1731955630168 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:58,727 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@762a3c73 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1625265116-172.17.0.2-1731955630168:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41919,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1625265116-172.17.0.2-1731955630168 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:47:58,727 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data3/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:58,727 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@762a3c73 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1625265116-172.17.0.2-1731955630168:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:41501,null,null], DatanodeInfoWithStorage[127.0.0.1:41919,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1625265116-172.17.0.2-1731955630168:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:41501,null,null], DatanodeInfoWithStorage[127.0.0.1:41919,null,null]] 2024-11-18T18:47:58,728 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data4/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:58,728 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:47:58,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f373428{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:58,731 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@60ec384f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:58,731 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:58,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@411972d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:58,731 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45b83ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:58,733 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:47:58,733 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:47:58,733 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:47:58,733 WARN [BP-1625265116-172.17.0.2-1731955630168 heartbeating to localhost/127.0.0.1:44763 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1625265116-172.17.0.2-1731955630168 (Datanode Uuid a01454de-06ee-4862-bd8f-d790f28eb598) service to localhost/127.0.0.1:44763 2024-11-18T18:47:58,733 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data5/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:58,734 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/cluster_9031e034-b7f5-afb3-60eb-3579d819136c/data/data6/current/BP-1625265116-172.17.0.2-1731955630168 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:47:58,734 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:47:58,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58a4fc41{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:47:58,740 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e92d0c5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:47:58,740 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:47:58,740 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37564f36{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:47:58,740 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@281d64b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir/,STOPPED} 2024-11-18T18:47:58,748 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T18:47:58,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T18:47:58,789 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:44763 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44763 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:44763 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41775 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:44763 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:44763 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:44763 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f945cbf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:44763 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:41775 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f945cbf4ff8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=448 (was 405) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=337 (was 442), ProcessCount=11 (was 11), AvailableMemoryMB=4158 (was 4658) 2024-11-18T18:47:58,796 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=337, ProcessCount=11, AvailableMemoryMB=4158 2024-11-18T18:47:58,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T18:47:58,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.log.dir so I do NOT create it in target/test-data/d61b808a-2184-acea-cbf7-e416e9952655 2024-11-18T18:47:58,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/e58c3c04-112e-06e4-e443-80713fcf26eb/hadoop.tmp.dir so I do NOT create it in target/test-data/d61b808a-2184-acea-cbf7-e416e9952655 2024-11-18T18:47:58,796 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758, deleteOnExit=true 2024-11-18T18:47:58,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T18:47:58,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/test.cache.data in system properties and HBase conf 2024-11-18T18:47:58,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T18:47:58,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir in system properties and HBase conf 2024-11-18T18:47:58,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T18:47:58,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T18:47:58,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T18:47:58,797 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T18:47:58,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:47:58,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:47:58,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T18:47:58,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:47:58,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T18:47:58,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T18:47:58,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:47:58,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:47:58,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T18:47:58,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/nfs.dump.dir in system properties and HBase conf 2024-11-18T18:47:58,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/java.io.tmpdir in system properties and HBase conf 2024-11-18T18:47:58,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:47:58,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T18:47:58,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T18:47:58,809 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:47:59,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T18:47:59,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:47:59,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T18:47:59,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T18:47:59,113 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:59,119 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:59,124 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:59,124 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:59,124 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:47:59,128 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:59,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28b84096{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:59,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@721d81ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:59,232 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e2e6f70{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/java.io.tmpdir/jetty-localhost-36557-hadoop-hdfs-3_4_1-tests_jar-_-any-17667266484760375940/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:47:59,233 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13f6b910{HTTP/1.1, (http/1.1)}{localhost:36557} 2024-11-18T18:47:59,233 INFO [Time-limited test {}] server.Server(415): Started @156622ms 2024-11-18T18:47:59,245 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:47:59,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:47:59,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:47:59,456 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:59,459 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:59,463 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:59,463 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:59,463 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:47:59,463 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ab8d0f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:59,464 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@124f9706{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:59,558 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12021a0a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/java.io.tmpdir/jetty-localhost-35423-hadoop-hdfs-3_4_1-tests_jar-_-any-720814047912929942/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:59,558 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@347323d1{HTTP/1.1, (http/1.1)}{localhost:35423} 2024-11-18T18:47:59,559 INFO [Time-limited test {}] server.Server(415): Started @156948ms 2024-11-18T18:47:59,560 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:47:59,586 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:47:59,590 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:47:59,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:47:59,591 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:47:59,591 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:47:59,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15befb81{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:47:59,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32ad5d3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:47:59,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f8bd790{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/java.io.tmpdir/jetty-localhost-32893-hadoop-hdfs-3_4_1-tests_jar-_-any-11854550871606561819/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:47:59,690 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5968e343{HTTP/1.1, (http/1.1)}{localhost:32893} 2024-11-18T18:47:59,690 INFO [Time-limited test {}] server.Server(415): Started @157079ms 2024-11-18T18:47:59,691 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:48:00,304 WARN [Thread-1207 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data1/current/BP-1843742824-172.17.0.2-1731955678813/current, will proceed with Du for space computation calculation, 2024-11-18T18:48:00,304 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data2/current/BP-1843742824-172.17.0.2-1731955678813/current, will proceed with Du for space computation calculation, 2024-11-18T18:48:00,323 WARN [Thread-1171 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:48:00,325 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41af9cf9c4881ae with lease ID 0xe496cdd5796c8b52: Processing first storage report for DS-84706612-96b9-4d56-b728-74db80946132 from datanode DatanodeRegistration(127.0.0.1:36505, datanodeUuid=c2ebdf43-6772-4926-abea-cada2d72e8dc, infoPort=37381, infoSecurePort=0, ipcPort=32929, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813) 2024-11-18T18:48:00,325 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41af9cf9c4881ae with lease ID 0xe496cdd5796c8b52: from storage DS-84706612-96b9-4d56-b728-74db80946132 node DatanodeRegistration(127.0.0.1:36505, datanodeUuid=c2ebdf43-6772-4926-abea-cada2d72e8dc, infoPort=37381, infoSecurePort=0, ipcPort=32929, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:00,325 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41af9cf9c4881ae with lease ID 0xe496cdd5796c8b52: Processing first storage report for DS-272b0e75-82c9-4cdc-bea2-b6a94e82f201 from datanode DatanodeRegistration(127.0.0.1:36505, datanodeUuid=c2ebdf43-6772-4926-abea-cada2d72e8dc, infoPort=37381, infoSecurePort=0, ipcPort=32929, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813) 2024-11-18T18:48:00,325 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41af9cf9c4881ae with lease ID 0xe496cdd5796c8b52: from storage DS-272b0e75-82c9-4cdc-bea2-b6a94e82f201 node DatanodeRegistration(127.0.0.1:36505, datanodeUuid=c2ebdf43-6772-4926-abea-cada2d72e8dc, infoPort=37381, infoSecurePort=0, ipcPort=32929, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:00,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:00,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:00,502 WARN [Thread-1218 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data3/current/BP-1843742824-172.17.0.2-1731955678813/current, will proceed with Du for space computation calculation, 2024-11-18T18:48:00,502 WARN [Thread-1219 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data4/current/BP-1843742824-172.17.0.2-1731955678813/current, will proceed with Du for space computation calculation, 2024-11-18T18:48:00,516 WARN [Thread-1194 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:48:00,518 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x566a85169f8994f3 with lease ID 0xe496cdd5796c8b53: Processing first storage report for DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7 from datanode DatanodeRegistration(127.0.0.1:41403, datanodeUuid=abc66573-6f57-4e0c-9d18-6c9c8ea3650f, infoPort=45459, infoSecurePort=0, ipcPort=45283, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813) 2024-11-18T18:48:00,518 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x566a85169f8994f3 with lease ID 0xe496cdd5796c8b53: from storage DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7 node DatanodeRegistration(127.0.0.1:41403, datanodeUuid=abc66573-6f57-4e0c-9d18-6c9c8ea3650f, infoPort=45459, infoSecurePort=0, ipcPort=45283, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:00,518 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x566a85169f8994f3 with lease ID 0xe496cdd5796c8b53: Processing first storage report for DS-c0cfb89c-7dfc-4d1d-9836-5620b552decb from datanode DatanodeRegistration(127.0.0.1:41403, datanodeUuid=abc66573-6f57-4e0c-9d18-6c9c8ea3650f, infoPort=45459, infoSecurePort=0, ipcPort=45283, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813) 2024-11-18T18:48:00,518 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x566a85169f8994f3 with lease ID 0xe496cdd5796c8b53: from storage DS-c0cfb89c-7dfc-4d1d-9836-5620b552decb node DatanodeRegistration(127.0.0.1:41403, datanodeUuid=abc66573-6f57-4e0c-9d18-6c9c8ea3650f, infoPort=45459, infoSecurePort=0, ipcPort=45283, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:00,619 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655 2024-11-18T18:48:00,622 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/zookeeper_0, clientPort=52973, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T18:48:00,622 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52973 2024-11-18T18:48:00,623 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:00,624 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:00,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36505 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:48:00,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41403 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:48:00,634 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff with version=8 2024-11-18T18:48:00,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/hbase-staging 2024-11-18T18:48:00,638 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:48:00,638 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:00,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:00,639 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:48:00,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:00,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:48:00,639 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T18:48:00,639 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:48:00,640 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42629 2024-11-18T18:48:00,641 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42629 connecting to ZooKeeper ensemble=127.0.0.1:52973 2024-11-18T18:48:00,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:426290x0, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:48:00,686 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42629-0x101509017550000 connected 2024-11-18T18:48:00,749 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:00,752 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:00,755 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:48:00,755 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff, hbase.cluster.distributed=false 2024-11-18T18:48:00,757 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:48:00,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42629 2024-11-18T18:48:00,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42629 2024-11-18T18:48:00,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42629 2024-11-18T18:48:00,759 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42629 2024-11-18T18:48:00,759 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42629 2024-11-18T18:48:00,775 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:48:00,775 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:00,775 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:00,775 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:48:00,775 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:00,775 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:48:00,775 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T18:48:00,776 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:48:00,776 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46215 2024-11-18T18:48:00,778 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46215 connecting to ZooKeeper ensemble=127.0.0.1:52973 2024-11-18T18:48:00,778 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:00,780 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:00,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462150x0, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:48:00,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:48:00,790 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46215-0x101509017550001 connected 2024-11-18T18:48:00,791 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T18:48:00,791 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T18:48:00,792 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T18:48:00,793 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:48:00,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46215 2024-11-18T18:48:00,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46215 2024-11-18T18:48:00,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46215 2024-11-18T18:48:00,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46215 2024-11-18T18:48:00,795 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46215 2024-11-18T18:48:00,809 DEBUG [M:0;39fff3b0f89c:42629 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39fff3b0f89c:42629 2024-11-18T18:48:00,809 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39fff3b0f89c,42629,1731955680638 2024-11-18T18:48:00,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:48:00,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:48:00,815 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39fff3b0f89c,42629,1731955680638 2024-11-18T18:48:00,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T18:48:00,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:00,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:00,824 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T18:48:00,824 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39fff3b0f89c,42629,1731955680638 from backup master directory 2024-11-18T18:48:00,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:48:00,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39fff3b0f89c,42629,1731955680638 2024-11-18T18:48:00,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:48:00,831 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:48:00,831 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39fff3b0f89c,42629,1731955680638 2024-11-18T18:48:00,836 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/hbase.id] with ID: 4410532a-9a08-4def-bf66-d3bd603795fa 2024-11-18T18:48:00,837 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/.tmp/hbase.id 2024-11-18T18:48:00,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41403 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:48:00,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36505 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:48:00,843 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/.tmp/hbase.id]:[hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/hbase.id] 2024-11-18T18:48:00,854 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:00,854 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T18:48:00,855 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T18:48:00,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:00,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:00,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36505 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:48:00,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41403 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:48:00,872 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:48:00,873 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T18:48:00,873 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:48:00,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36505 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:48:00,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41403 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:48:00,881 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store 2024-11-18T18:48:00,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36505 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:48:00,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41403 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:48:00,888 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:00,888 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:48:00,888 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:00,888 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:00,888 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:48:00,888 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:00,888 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:00,888 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955680888Disabling compacts and flushes for region at 1731955680888Disabling writes for close at 1731955680888Writing region close event to WAL at 1731955680888Closed at 1731955680888 2024-11-18T18:48:00,889 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/.initializing 2024-11-18T18:48:00,889 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638 2024-11-18T18:48:00,892 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C42629%2C1731955680638, suffix=, logDir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638, archiveDir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/oldWALs, maxLogs=10 2024-11-18T18:48:00,893 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C42629%2C1731955680638.1731955680893 2024-11-18T18:48:00,898 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638/39fff3b0f89c%2C42629%2C1731955680638.1731955680893 2024-11-18T18:48:00,898 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37381:37381),(127.0.0.1/127.0.0.1:45459:45459)] 2024-11-18T18:48:00,899 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:48:00,899 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:00,899 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,899 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,902 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T18:48:00,904 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:00,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:00,904 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T18:48:00,906 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:00,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:48:00,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T18:48:00,908 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:00,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:48:00,908 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T18:48:00,910 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:00,911 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:48:00,911 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,911 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,912 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,913 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,913 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,913 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T18:48:00,914 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:00,917 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:48:00,917 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812885, jitterRate=0.033637985587120056}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T18:48:00,918 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731955680899Initializing all the Stores at 1731955680900 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955680900Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955680902 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955680902Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955680902Cleaning up temporary data from old regions at 1731955680913 (+11 ms)Region opened successfully at 1731955680918 (+5 ms) 2024-11-18T18:48:00,918 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T18:48:00,921 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@470f043d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:48:00,922 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T18:48:00,922 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T18:48:00,922 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T18:48:00,922 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T18:48:00,923 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T18:48:00,923 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T18:48:00,923 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T18:48:00,925 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T18:48:00,926 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T18:48:00,931 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T18:48:00,932 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T18:48:00,933 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T18:48:00,940 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T18:48:00,940 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T18:48:00,941 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T18:48:00,948 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T18:48:00,949 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T18:48:00,956 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T18:48:00,958 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T18:48:00,965 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T18:48:00,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:48:00,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:48:00,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:00,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:00,974 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39fff3b0f89c,42629,1731955680638, sessionid=0x101509017550000, setting cluster-up flag (Was=false) 2024-11-18T18:48:00,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:00,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:01,015 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T18:48:01,016 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,42629,1731955680638 2024-11-18T18:48:01,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:01,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:01,056 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T18:48:01,058 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,42629,1731955680638 2024-11-18T18:48:01,059 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T18:48:01,061 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T18:48:01,061 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T18:48:01,061 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T18:48:01,061 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39fff3b0f89c,42629,1731955680638 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T18:48:01,063 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:48:01,063 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:48:01,063 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:48:01,063 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:48:01,063 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39fff3b0f89c:0, corePoolSize=10, maxPoolSize=10 2024-11-18T18:48:01,063 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,063 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:48:01,063 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,064 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731955711064 2024-11-18T18:48:01,065 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T18:48:01,065 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T18:48:01,065 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T18:48:01,065 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T18:48:01,065 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T18:48:01,065 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T18:48:01,065 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,065 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T18:48:01,065 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T18:48:01,065 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T18:48:01,065 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:48:01,066 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T18:48:01,066 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T18:48:01,066 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T18:48:01,066 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955681066,5,FailOnTimeoutGroup] 2024-11-18T18:48:01,066 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955681066,5,FailOnTimeoutGroup] 2024-11-18T18:48:01,066 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,066 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T18:48:01,066 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,066 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,067 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:01,067 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T18:48:01,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41403 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:48:01,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36505 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:48:01,077 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T18:48:01,077 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff 2024-11-18T18:48:01,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36505 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:48:01,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41403 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:48:01,097 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(746): ClusterId : 4410532a-9a08-4def-bf66-d3bd603795fa 2024-11-18T18:48:01,097 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T18:48:01,107 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T18:48:01,107 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T18:48:01,115 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T18:48:01,116 DEBUG [RS:0;39fff3b0f89c:46215 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1159de1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:48:01,127 DEBUG [RS:0;39fff3b0f89c:46215 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39fff3b0f89c:46215 2024-11-18T18:48:01,127 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T18:48:01,127 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T18:48:01,128 DEBUG [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T18:48:01,128 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(2659): reportForDuty to master=39fff3b0f89c,42629,1731955680638 with port=46215, startcode=1731955680775 2024-11-18T18:48:01,128 DEBUG [RS:0;39fff3b0f89c:46215 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T18:48:01,130 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46941, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T18:48:01,130 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42629 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:01,130 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42629 {}] master.ServerManager(517): Registering regionserver=39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:01,132 DEBUG [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff 2024-11-18T18:48:01,132 DEBUG [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39915 2024-11-18T18:48:01,132 DEBUG [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T18:48:01,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:48:01,140 DEBUG [RS:0;39fff3b0f89c:46215 {}] zookeeper.ZKUtil(111): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:01,140 WARN [RS:0;39fff3b0f89c:46215 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:48:01,140 INFO [RS:0;39fff3b0f89c:46215 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:48:01,140 DEBUG [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:01,141 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39fff3b0f89c,46215,1731955680775] 2024-11-18T18:48:01,144 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T18:48:01,145 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T18:48:01,145 INFO [RS:0;39fff3b0f89c:46215 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T18:48:01,145 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,145 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T18:48:01,146 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T18:48:01,146 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,146 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,146 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:48:01,147 DEBUG [RS:0;39fff3b0f89c:46215 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:48:01,148 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,148 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,148 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,148 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,148 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,148 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,46215,1731955680775-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:48:01,164 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T18:48:01,164 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,46215,1731955680775-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,164 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,164 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.Replication(171): 39fff3b0f89c,46215,1731955680775 started 2024-11-18T18:48:01,177 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,177 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(1482): Serving as 39fff3b0f89c,46215,1731955680775, RpcServer on 39fff3b0f89c/172.17.0.2:46215, sessionid=0x101509017550001 2024-11-18T18:48:01,177 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T18:48:01,177 DEBUG [RS:0;39fff3b0f89c:46215 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:01,177 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,46215,1731955680775' 2024-11-18T18:48:01,177 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T18:48:01,178 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T18:48:01,178 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T18:48:01,178 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T18:48:01,178 DEBUG [RS:0;39fff3b0f89c:46215 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:01,178 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,46215,1731955680775' 2024-11-18T18:48:01,178 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T18:48:01,178 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T18:48:01,179 DEBUG [RS:0;39fff3b0f89c:46215 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T18:48:01,179 INFO [RS:0;39fff3b0f89c:46215 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T18:48:01,179 INFO [RS:0;39fff3b0f89c:46215 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T18:48:01,282 INFO [RS:0;39fff3b0f89c:46215 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C46215%2C1731955680775, suffix=, logDir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775, archiveDir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/oldWALs, maxLogs=32 2024-11-18T18:48:01,284 INFO [RS:0;39fff3b0f89c:46215 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46215%2C1731955680775.1731955681284 2024-11-18T18:48:01,291 INFO [RS:0;39fff3b0f89c:46215 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 2024-11-18T18:48:01,292 DEBUG [RS:0;39fff3b0f89c:46215 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45459:45459),(127.0.0.1/127.0.0.1:37381:37381)] 2024-11-18T18:48:01,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:01,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:01,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:01,486 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:48:01,488 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:48:01,488 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:01,488 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:01,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:48:01,490 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:48:01,490 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:01,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:01,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:48:01,492 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:48:01,492 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:01,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:01,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:48:01,494 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:48:01,494 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:01,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:01,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:48:01,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740 2024-11-18T18:48:01,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740 2024-11-18T18:48:01,497 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:48:01,497 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:48:01,497 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:48:01,499 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:48:01,501 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:48:01,501 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690741, jitterRate=-0.12167803943157196}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:48:01,502 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731955681485Initializing all the Stores at 1731955681486 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955681486Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955681486Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955681486Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955681486Cleaning up temporary data from old regions at 1731955681497 (+11 ms)Region opened successfully at 1731955681501 (+4 ms) 2024-11-18T18:48:01,502 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:48:01,502 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:48:01,502 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:48:01,502 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:48:01,502 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:48:01,502 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:48:01,502 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955681502Disabling compacts and flushes for region at 1731955681502Disabling writes for close at 1731955681502Writing region close event to WAL at 1731955681502Closed at 1731955681502 2024-11-18T18:48:01,504 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:48:01,504 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T18:48:01,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T18:48:01,505 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:48:01,506 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T18:48:01,657 DEBUG [39fff3b0f89c:42629 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T18:48:01,657 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:01,660 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,46215,1731955680775, state=OPENING 2024-11-18T18:48:01,699 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T18:48:01,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:01,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:01,708 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:48:01,708 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:48:01,708 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:48:01,708 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,46215,1731955680775}] 2024-11-18T18:48:01,864 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T18:48:01,868 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34949, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T18:48:01,873 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T18:48:01,874 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:48:01,876 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C46215%2C1731955680775.meta, suffix=.meta, logDir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775, archiveDir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/oldWALs, maxLogs=32 2024-11-18T18:48:01,877 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46215%2C1731955680775.meta.1731955681877.meta 2024-11-18T18:48:01,884 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.meta.1731955681877.meta 2024-11-18T18:48:01,885 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37381:37381),(127.0.0.1/127.0.0.1:45459:45459)] 2024-11-18T18:48:01,886 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:48:01,886 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T18:48:01,886 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T18:48:01,886 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T18:48:01,886 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T18:48:01,886 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:01,886 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T18:48:01,886 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T18:48:01,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:48:01,888 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:48:01,888 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:01,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:01,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:48:01,889 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:48:01,889 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:01,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:01,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:48:01,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:48:01,891 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:01,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:01,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:48:01,892 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:48:01,892 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:01,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:01,893 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:48:01,893 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740 2024-11-18T18:48:01,894 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740 2024-11-18T18:48:01,896 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:48:01,896 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:48:01,896 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:48:01,898 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:48:01,898 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822325, jitterRate=0.04564148187637329}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:48:01,898 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T18:48:01,899 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731955681886Writing region info on filesystem at 1731955681886Initializing all the Stores at 1731955681887 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955681887Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955681887Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955681887Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955681887Cleaning up temporary data from old regions at 1731955681896 (+9 ms)Running coprocessor post-open hooks at 1731955681898 (+2 ms)Region opened successfully at 1731955681899 (+1 ms) 2024-11-18T18:48:01,900 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731955681863 2024-11-18T18:48:01,902 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T18:48:01,902 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T18:48:01,903 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:01,905 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,46215,1731955680775, state=OPEN 2024-11-18T18:48:01,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:48:01,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:48:01,942 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:01,942 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:48:01,942 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:48:01,947 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T18:48:01,947 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,46215,1731955680775 in 234 msec 2024-11-18T18:48:01,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T18:48:01,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 443 msec 2024-11-18T18:48:01,952 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:48:01,952 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T18:48:01,954 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:48:01,954 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,46215,1731955680775, seqNum=-1] 2024-11-18T18:48:01,954 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:48:01,955 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59893, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:48:01,961 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 900 msec 2024-11-18T18:48:01,962 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731955681962, completionTime=-1 2024-11-18T18:48:01,962 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T18:48:01,962 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-18T18:48:01,963 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-18T18:48:01,963 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731955741963 2024-11-18T18:48:01,964 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731955801964 2024-11-18T18:48:01,964 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-18T18:48:01,964 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,42629,1731955680638-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,964 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,42629,1731955680638-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,964 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,42629,1731955680638-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,964 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39fff3b0f89c:42629, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,964 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,964 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,966 DEBUG [master/39fff3b0f89c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T18:48:01,968 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.136sec 2024-11-18T18:48:01,968 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T18:48:01,968 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T18:48:01,968 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T18:48:01,968 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T18:48:01,968 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T18:48:01,968 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,42629,1731955680638-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:48:01,968 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,42629,1731955680638-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T18:48:01,971 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T18:48:01,971 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T18:48:01,971 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,42629,1731955680638-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:01,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b6fee43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:48:01,998 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39fff3b0f89c,42629,-1 for getting cluster id 2024-11-18T18:48:01,998 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T18:48:01,999 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4410532a-9a08-4def-bf66-d3bd603795fa' 2024-11-18T18:48:02,000 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T18:48:02,000 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4410532a-9a08-4def-bf66-d3bd603795fa" 2024-11-18T18:48:02,000 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76b0cb2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:48:02,000 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39fff3b0f89c,42629,-1] 2024-11-18T18:48:02,000 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T18:48:02,001 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:48:02,002 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54732, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T18:48:02,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66618ea8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:48:02,003 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:48:02,004 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,46215,1731955680775, seqNum=-1] 2024-11-18T18:48:02,005 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:48:02,006 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50310, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:48:02,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39fff3b0f89c,42629,1731955680638 2024-11-18T18:48:02,008 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:02,011 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T18:48:02,011 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-18T18:48:02,011 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-18T18:48:02,011 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T18:48:02,012 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 39fff3b0f89c,42629,1731955680638 2024-11-18T18:48:02,012 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@46fbb398 2024-11-18T18:48:02,012 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T18:48:02,014 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54736, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T18:48:02,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42629 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T18:48:02,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42629 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T18:48:02,015 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42629 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:48:02,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42629 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T18:48:02,018 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T18:48:02,018 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:02,018 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42629 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-18T18:48:02,019 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T18:48:02,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42629 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T18:48:02,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36505 is added to blk_1073741835_1011 (size=395) 2024-11-18T18:48:02,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41403 is added to blk_1073741835_1011 (size=395) 2024-11-18T18:48:02,029 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f1790a755abd4dc512f3c6df9e1a12af, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff 2024-11-18T18:48:02,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41403 is added to blk_1073741836_1012 (size=78) 2024-11-18T18:48:02,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36505 is added to blk_1073741836_1012 (size=78) 2024-11-18T18:48:02,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:02,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing f1790a755abd4dc512f3c6df9e1a12af, disabling compactions & flushes 2024-11-18T18:48:02,038 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:02,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:02,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. after waiting 0 ms 2024-11-18T18:48:02,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:02,038 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:02,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for f1790a755abd4dc512f3c6df9e1a12af: Waiting for close lock at 1731955682038Disabling compacts and flushes for region at 1731955682038Disabling writes for close at 1731955682038Writing region close event to WAL at 1731955682038Closed at 1731955682038 2024-11-18T18:48:02,040 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T18:48:02,040 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731955682040"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731955682040"}]},"ts":"1731955682040"} 2024-11-18T18:48:02,043 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T18:48:02,045 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T18:48:02,045 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731955682045"}]},"ts":"1731955682045"} 2024-11-18T18:48:02,047 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-18T18:48:02,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f1790a755abd4dc512f3c6df9e1a12af, ASSIGN}] 2024-11-18T18:48:02,049 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f1790a755abd4dc512f3c6df9e1a12af, ASSIGN 2024-11-18T18:48:02,050 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f1790a755abd4dc512f3c6df9e1a12af, ASSIGN; state=OFFLINE, location=39fff3b0f89c,46215,1731955680775; forceNewPlan=false, retain=false 2024-11-18T18:48:02,201 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f1790a755abd4dc512f3c6df9e1a12af, regionState=OPENING, regionLocation=39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:02,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f1790a755abd4dc512f3c6df9e1a12af, ASSIGN because future has completed 2024-11-18T18:48:02,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f1790a755abd4dc512f3c6df9e1a12af, server=39fff3b0f89c,46215,1731955680775}] 2024-11-18T18:48:02,370 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:02,370 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f1790a755abd4dc512f3c6df9e1a12af, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:48:02,371 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:02,371 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:02,371 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:02,371 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:02,373 INFO [StoreOpener-f1790a755abd4dc512f3c6df9e1a12af-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:02,375 INFO [StoreOpener-f1790a755abd4dc512f3c6df9e1a12af-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f1790a755abd4dc512f3c6df9e1a12af columnFamilyName info 2024-11-18T18:48:02,375 DEBUG [StoreOpener-f1790a755abd4dc512f3c6df9e1a12af-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:02,376 INFO [StoreOpener-f1790a755abd4dc512f3c6df9e1a12af-1 {}] regionserver.HStore(327): Store=f1790a755abd4dc512f3c6df9e1a12af/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:48:02,376 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:02,377 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/default/TestLogRolling-testLogRollOnPipelineRestart/f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:02,377 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/default/TestLogRolling-testLogRollOnPipelineRestart/f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:02,378 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:02,378 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:02,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:02,381 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:02,384 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/default/TestLogRolling-testLogRollOnPipelineRestart/f1790a755abd4dc512f3c6df9e1a12af/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:48:02,384 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f1790a755abd4dc512f3c6df9e1a12af; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824397, jitterRate=0.048276081681251526}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T18:48:02,384 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:02,385 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f1790a755abd4dc512f3c6df9e1a12af: Running coprocessor pre-open hook at 1731955682371Writing region info on filesystem at 1731955682371Initializing all the Stores at 1731955682373 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955682373Cleaning up temporary data from old regions at 1731955682378 (+5 ms)Running coprocessor post-open hooks at 1731955682384 (+6 ms)Region opened successfully at 1731955682385 (+1 ms) 2024-11-18T18:48:02,386 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af., pid=6, masterSystemTime=1731955682363 2024-11-18T18:48:02,388 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:02,388 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:02,389 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f1790a755abd4dc512f3c6df9e1a12af, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:02,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:02,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f1790a755abd4dc512f3c6df9e1a12af, server=39fff3b0f89c,46215,1731955680775 because future has completed 2024-11-18T18:48:02,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T18:48:02,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f1790a755abd4dc512f3c6df9e1a12af, server=39fff3b0f89c,46215,1731955680775 in 186 msec 2024-11-18T18:48:02,402 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T18:48:02,402 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=f1790a755abd4dc512f3c6df9e1a12af, ASSIGN in 349 msec 2024-11-18T18:48:02,403 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T18:48:02,404 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731955682404"}]},"ts":"1731955682404"} 2024-11-18T18:48:02,406 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-18T18:48:02,407 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T18:48:02,410 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 392 msec 2024-11-18T18:48:03,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:03,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:04,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:04,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:04,541 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T18:48:04,560 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:04,560 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:04,560 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:04,561 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:04,561 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:04,561 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:04,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:04,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:04,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:04,568 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:05,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:05,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:06,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:06,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:07,144 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T18:48:07,144 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-18T18:48:07,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:07,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:08,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:08,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:09,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T18:48:09,034 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T18:48:09,036 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T18:48:09,036 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-18T18:48:09,038 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:48:09,038 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T18:48:09,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T18:48:09,039 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-18T18:48:09,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:09,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:10,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:10,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:11,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:11,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:12,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42629 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T18:48:12,089 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-18T18:48:12,089 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-18T18:48:12,092 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T18:48:12,092 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:12,096 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af., hostname=39fff3b0f89c,46215,1731955680775, seqNum=2] 2024-11-18T18:48:12,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:12,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:13,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:13,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:14,099 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 2024-11-18T18:48:14,100 WARN [ResponseProcessor for block BP-1843742824-172.17.0.2-1731955678813:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1843742824-172.17.0.2-1731955678813:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:14,100 WARN [ResponseProcessor for block BP-1843742824-172.17.0.2-1731955678813:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1843742824-172.17.0.2-1731955678813:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1843742824-172.17.0.2-1731955678813:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:41403,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:14,100 WARN [ResponseProcessor for block BP-1843742824-172.17.0.2-1731955678813:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1843742824-172.17.0.2-1731955678813:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1843742824-172.17.0.2-1731955678813:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:41403,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:14,101 WARN [DataStreamer for file /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 block BP-1843742824-172.17.0.2-1731955678813:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1843742824-172.17.0.2-1731955678813:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41403,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK], DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41403,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK]) is bad. 2024-11-18T18:48:14,101 WARN [DataStreamer for file /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638/39fff3b0f89c%2C42629%2C1731955680638.1731955680893 block BP-1843742824-172.17.0.2-1731955678813:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1843742824-172.17.0.2-1731955678813:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK], DatanodeInfoWithStorage[127.0.0.1:41403,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41403,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK]) is bad. 2024-11-18T18:48:14,101 WARN [DataStreamer for file /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.meta.1731955681877.meta block BP-1843742824-172.17.0.2-1731955678813:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1843742824-172.17.0.2-1731955678813:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK], DatanodeInfoWithStorage[127.0.0.1:41403,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41403,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK]) is bad. 2024-11-18T18:48:14,101 WARN [PacketResponder: BP-1843742824-172.17.0.2-1731955678813:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41403] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:14,101 WARN [PacketResponder: BP-1843742824-172.17.0.2-1731955678813:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41403] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:14,101 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1342656795_22 at /127.0.0.1:48854 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41403:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48854 dst: /127.0.0.1:41403 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:14,101 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1342656795_22 at /127.0.0.1:44202 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44202 dst: /127.0.0.1:36505 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:14,101 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1505339734_22 at /127.0.0.1:44164 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44164 dst: /127.0.0.1:36505 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:14,101 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1342656795_22 at /127.0.0.1:44212 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44212 dst: /127.0.0.1:36505 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:14,101 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1342656795_22 at /127.0.0.1:48870 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41403:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48870 dst: /127.0.0.1:41403 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:14,102 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1505339734_22 at /127.0.0.1:48838 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41403:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48838 dst: /127.0.0.1:41403 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:14,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f8bd790{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:14,156 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5968e343{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:48:14,156 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:48:14,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32ad5d3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:48:14,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15befb81{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,STOPPED} 2024-11-18T18:48:14,157 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:48:14,157 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:48:14,157 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1843742824-172.17.0.2-1731955678813 (Datanode Uuid abc66573-6f57-4e0c-9d18-6c9c8ea3650f) service to localhost/127.0.0.1:39915 2024-11-18T18:48:14,157 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:48:14,158 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data3/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:14,158 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data4/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:14,158 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:48:14,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:48:14,169 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:48:14,170 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:48:14,170 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:48:14,170 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:48:14,171 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d8c7947{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:48:14,171 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44ef9e03{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:48:14,262 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@797b72a2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/java.io.tmpdir/jetty-localhost-40897-hadoop-hdfs-3_4_1-tests_jar-_-any-1113631590754228688/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:14,263 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24cc63e3{HTTP/1.1, (http/1.1)}{localhost:40897} 2024-11-18T18:48:14,263 INFO [Time-limited test {}] server.Server(415): Started @171652ms 2024-11-18T18:48:14,264 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:48:14,281 WARN [ResponseProcessor for block BP-1843742824-172.17.0.2-1731955678813:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1843742824-172.17.0.2-1731955678813:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:14,281 WARN [ResponseProcessor for block BP-1843742824-172.17.0.2-1731955678813:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1843742824-172.17.0.2-1731955678813:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:14,281 WARN [ResponseProcessor for block BP-1843742824-172.17.0.2-1731955678813:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1843742824-172.17.0.2-1731955678813:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:14,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1342656795_22 at /127.0.0.1:40838 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40838 dst: /127.0.0.1:36505 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:14,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1505339734_22 at /127.0.0.1:40830 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40830 dst: /127.0.0.1:36505 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:14,281 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1342656795_22 at /127.0.0.1:40836 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36505:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40836 dst: /127.0.0.1:36505 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:14,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12021a0a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:14,289 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@347323d1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:48:14,289 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:48:14,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@124f9706{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:48:14,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ab8d0f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,STOPPED} 2024-11-18T18:48:14,290 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:48:14,290 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:48:14,290 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:48:14,290 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1843742824-172.17.0.2-1731955678813 (Datanode Uuid c2ebdf43-6772-4926-abea-cada2d72e8dc) service to localhost/127.0.0.1:39915 2024-11-18T18:48:14,291 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data1/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:14,291 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data2/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:14,291 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:48:14,298 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:48:14,302 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:48:14,303 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:48:14,303 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:48:14,303 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:48:14,303 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f95128b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:48:14,303 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a57d797{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:48:14,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:14,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e7f9298{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/java.io.tmpdir/jetty-localhost-45559-hadoop-hdfs-3_4_1-tests_jar-_-any-5626278135921866687/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:14,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:14,398 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6baac986{HTTP/1.1, (http/1.1)}{localhost:45559} 2024-11-18T18:48:14,398 INFO [Time-limited test {}] server.Server(415): Started @171787ms 2024-11-18T18:48:14,399 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:48:14,713 WARN [Thread-1342 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:48:14,716 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b74da5aa68e3ea5 with lease ID 0xe496cdd5796c8b54: from storage DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7 node DatanodeRegistration(127.0.0.1:35681, datanodeUuid=abc66573-6f57-4e0c-9d18-6c9c8ea3650f, infoPort=37573, infoSecurePort=0, ipcPort=40249, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T18:48:14,717 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b74da5aa68e3ea5 with lease ID 0xe496cdd5796c8b54: from storage DS-c0cfb89c-7dfc-4d1d-9836-5620b552decb node DatanodeRegistration(127.0.0.1:35681, datanodeUuid=abc66573-6f57-4e0c-9d18-6c9c8ea3650f, infoPort=37573, infoSecurePort=0, ipcPort=40249, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:14,865 WARN [Thread-1362 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:48:14,868 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19c161ccc3169c50 with lease ID 0xe496cdd5796c8b55: from storage DS-84706612-96b9-4d56-b728-74db80946132 node DatanodeRegistration(127.0.0.1:34093, datanodeUuid=c2ebdf43-6772-4926-abea-cada2d72e8dc, infoPort=44375, infoSecurePort=0, ipcPort=33625, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:14,868 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x19c161ccc3169c50 with lease ID 0xe496cdd5796c8b55: from storage DS-272b0e75-82c9-4cdc-bea2-b6a94e82f201 node DatanodeRegistration(127.0.0.1:34093, datanodeUuid=c2ebdf43-6772-4926-abea-cada2d72e8dc, infoPort=44375, infoSecurePort=0, ipcPort=33625, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:15,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:15,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:15,414 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-18T18:48:15,417 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-18T18:48:15,418 ERROR [FSHLog-0-hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff-prefix:39fff3b0f89c,46215,1731955680775 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:15,418 WARN [FSHLog-0-hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff-prefix:39fff3b0f89c,46215,1731955680775 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:15,418 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C46215%2C1731955680775:(num 1731955681284) roll requested 2024-11-18T18:48:15,419 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46215%2C1731955680775.1731955695418 2024-11-18T18:48:15,425 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 newFile=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 2024-11-18T18:48:15,425 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:15,425 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:15,426 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:15,426 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:15,426 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:15,426 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 2024-11-18T18:48:15,427 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:15,428 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:15,428 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 2024-11-18T18:48:15,428 WARN [IPC Server handler 0 on default port 39915 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-18T18:48:15,429 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 after 0ms 2024-11-18T18:48:15,432 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44375:44375),(127.0.0.1/127.0.0.1:37573:37573)] 2024-11-18T18:48:15,432 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 is not closed yet, will try archiving it next time 2024-11-18T18:48:16,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:16,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:17,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:17,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:17,436 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-18T18:48:17,716 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T18:48:18,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:18,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:19,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:19,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:19,429 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 after 4001ms 2024-11-18T18:48:19,440 WARN [ResponseProcessor for block BP-1843742824-172.17.0.2-1731955678813:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1843742824-172.17.0.2-1731955678813:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:19,441 WARN [DataStreamer for file /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 block BP-1843742824-172.17.0.2-1731955678813:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1843742824-172.17.0.2-1731955678813:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34093,DS-84706612-96b9-4d56-b728-74db80946132,DISK], DatanodeInfoWithStorage[127.0.0.1:35681,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34093,DS-84706612-96b9-4d56-b728-74db80946132,DISK]) is bad. 2024-11-18T18:48:19,441 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1342656795_22 at /127.0.0.1:39144 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39144 dst: /127.0.0.1:35681 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:19,441 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1342656795_22 at /127.0.0.1:43212 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:34093:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43212 dst: /127.0.0.1:34093 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:19,488 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e7f9298{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:19,489 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6baac986{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:48:19,489 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:48:19,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a57d797{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:48:19,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f95128b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,STOPPED} 2024-11-18T18:48:19,496 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:48:19,496 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1843742824-172.17.0.2-1731955678813 (Datanode Uuid c2ebdf43-6772-4926-abea-cada2d72e8dc) service to localhost/127.0.0.1:39915 2024-11-18T18:48:19,497 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data1/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:19,497 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data2/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:19,498 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:48:19,498 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:48:19,498 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:48:19,522 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:48:19,526 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:48:19,528 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:48:19,528 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:48:19,528 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:48:19,528 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55678be6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:48:19,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ebe8434{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:48:19,651 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@119a914{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/java.io.tmpdir/jetty-localhost-34497-hadoop-hdfs-3_4_1-tests_jar-_-any-12911042147333275657/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:19,652 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5efc28c8{HTTP/1.1, (http/1.1)}{localhost:34497} 2024-11-18T18:48:19,652 INFO [Time-limited test {}] server.Server(415): Started @177041ms 2024-11-18T18:48:19,653 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:48:19,697 WARN [ResponseProcessor for block BP-1843742824-172.17.0.2-1731955678813:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1843742824-172.17.0.2-1731955678813:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:19,697 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1342656795_22 at /127.0.0.1:39172 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39172 dst: /127.0.0.1:35681 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:19,700 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@797b72a2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:19,700 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24cc63e3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:48:19,700 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:48:19,700 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44ef9e03{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:48:19,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d8c7947{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,STOPPED} 2024-11-18T18:48:19,702 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:48:19,702 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:48:19,702 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1843742824-172.17.0.2-1731955678813 (Datanode Uuid abc66573-6f57-4e0c-9d18-6c9c8ea3650f) service to localhost/127.0.0.1:39915 2024-11-18T18:48:19,702 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:48:19,702 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data3/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:19,702 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data4/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:19,702 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:48:19,714 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:48:19,717 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:48:19,719 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:48:19,719 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:48:19,719 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:48:19,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78dbe58a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:48:19,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38a0c8b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:48:19,818 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58681746{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/java.io.tmpdir/jetty-localhost-37357-hadoop-hdfs-3_4_1-tests_jar-_-any-12571055248044706609/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:19,818 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e65fed6{HTTP/1.1, (http/1.1)}{localhost:37357} 2024-11-18T18:48:19,819 INFO [Time-limited test {}] server.Server(415): Started @177207ms 2024-11-18T18:48:19,820 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:48:20,064 WARN [Thread-1416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:48:20,067 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc38c6491e9bc0f26 with lease ID 0xe496cdd5796c8b56: from storage DS-84706612-96b9-4d56-b728-74db80946132 node DatanodeRegistration(127.0.0.1:36921, datanodeUuid=c2ebdf43-6772-4926-abea-cada2d72e8dc, infoPort=33819, infoSecurePort=0, ipcPort=41967, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:20,067 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc38c6491e9bc0f26 with lease ID 0xe496cdd5796c8b56: from storage DS-272b0e75-82c9-4cdc-bea2-b6a94e82f201 node DatanodeRegistration(127.0.0.1:36921, datanodeUuid=c2ebdf43-6772-4926-abea-cada2d72e8dc, infoPort=33819, infoSecurePort=0, ipcPort=41967, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:20,253 WARN [Thread-1436 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:48:20,261 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc93302cfbc0d52a9 with lease ID 0xe496cdd5796c8b57: from storage DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7 node DatanodeRegistration(127.0.0.1:33221, datanodeUuid=abc66573-6f57-4e0c-9d18-6c9c8ea3650f, infoPort=42237, infoSecurePort=0, ipcPort=37611, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:20,261 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc93302cfbc0d52a9 with lease ID 0xe496cdd5796c8b57: from storage DS-c0cfb89c-7dfc-4d1d-9836-5620b552decb node DatanodeRegistration(127.0.0.1:33221, datanodeUuid=abc66573-6f57-4e0c-9d18-6c9c8ea3650f, infoPort=42237, infoSecurePort=0, ipcPort=37611, storageInfo=lv=-57;cid=testClusterID;nsid=954678236;c=1731955678813), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:20,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:20,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:20,841 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-18T18:48:20,843 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-18T18:48:20,844 ERROR [FSHLog-0-hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff-prefix:39fff3b0f89c,46215,1731955680775 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35681,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:20,844 WARN [FSHLog-0-hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff-prefix:39fff3b0f89c,46215,1731955680775 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35681,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:20,844 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C46215%2C1731955680775:(num 1731955695418) roll requested 2024-11-18T18:48:20,844 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46215%2C1731955680775.1731955700844 2024-11-18T18:48:20,854 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 newFile=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955700844 2024-11-18T18:48:20,854 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:20,854 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:20,854 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:20,855 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:20,855 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:20,855 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955700844 2024-11-18T18:48:20,855 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35681,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:20,855 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35681,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:20,855 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 2024-11-18T18:48:20,856 WARN [IPC Server handler 2 on default port 39915 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-18T18:48:20,856 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 after 1ms 2024-11-18T18:48:20,860 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42237:42237),(127.0.0.1/127.0.0.1:33819:33819)] 2024-11-18T18:48:20,860 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 is not closed yet, will try archiving it next time 2024-11-18T18:48:21,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:21,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:22,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:22,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:22,862 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46215%2C1731955680775.1731955702862 2024-11-18T18:48:22,869 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955700844 newFile=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 2024-11-18T18:48:22,870 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:22,870 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:22,870 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:22,870 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:22,871 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:22,871 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955700844 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 2024-11-18T18:48:22,873 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33819:33819),(127.0.0.1/127.0.0.1:42237:42237)] 2024-11-18T18:48:22,873 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 is not closed yet, will try archiving it next time 2024-11-18T18:48:22,873 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955700844 is not closed yet, will try archiving it next time 2024-11-18T18:48:22,873 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 2024-11-18T18:48:22,873 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 2024-11-18T18:48:22,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741838_1019 (size=1264) 2024-11-18T18:48:22,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741838_1019 (size=1264) 2024-11-18T18:48:22,875 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 after 1ms 2024-11-18T18:48:22,875 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 2024-11-18T18:48:22,875 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 is not closed yet, will try archiving it next time 2024-11-18T18:48:22,883 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731955682385/Put/vlen=218/seqid=0] 2024-11-18T18:48:22,883 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731955692098/Put/vlen=1045/seqid=0] 2024-11-18T18:48:22,883 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955681284 2024-11-18T18:48:22,883 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 2024-11-18T18:48:22,883 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 2024-11-18T18:48:22,884 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 after 1ms 2024-11-18T18:48:22,884 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 2024-11-18T18:48:22,887 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731955695418/Put/vlen=1045/seqid=0] 2024-11-18T18:48:22,887 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731955697438/Put/vlen=1045/seqid=0] 2024-11-18T18:48:22,887 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 2024-11-18T18:48:22,887 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955700844 2024-11-18T18:48:22,887 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955700844 2024-11-18T18:48:22,888 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955700844 after 0ms 2024-11-18T18:48:22,888 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955700844 2024-11-18T18:48:22,890 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731955700844/Put/vlen=1045/seqid=0] 2024-11-18T18:48:22,890 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 2024-11-18T18:48:22,890 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 2024-11-18T18:48:22,891 WARN [IPC Server handler 2 on default port 39915 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-18T18:48:22,891 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 after 1ms 2024-11-18T18:48:23,266 WARN [ResponseProcessor for block BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:23,266 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1505339734_22 at /127.0.0.1:59898 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36921:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59898 dst: /127.0.0.1:36921 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:36921 remote=/127.0.0.1:59898]. Total timeout mills is 60000, 59603 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:23,267 WARN [DataStreamer for file /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 block BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36921,DS-84706612-96b9-4d56-b728-74db80946132,DISK], DatanodeInfoWithStorage[127.0.0.1:33221,DS-5389b11e-6a2d-4643-88ac-ec9b0ed37ac7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36921,DS-84706612-96b9-4d56-b728-74db80946132,DISK]) is bad. 2024-11-18T18:48:23,266 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1505339734_22 at /127.0.0.1:59812 [Receiving block BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:33221:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59812 dst: /127.0.0.1:33221 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:23,268 WARN [DataStreamer for file /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 block BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:23,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741839_1022 (size=85) 2024-11-18T18:48:23,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:23,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:24,068 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T18:48:24,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:24,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:24,858 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955695418 after 4003ms 2024-11-18T18:48:25,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:25,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:26,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:26,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:26,893 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 after 4003ms 2024-11-18T18:48:26,893 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 2024-11-18T18:48:26,898 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 2024-11-18T18:48:26,898 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-18T18:48:26,898 ERROR [FSHLog-0-hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff-prefix:39fff3b0f89c,46215,1731955680775.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:26,898 WARN [FSHLog-0-hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff-prefix:39fff3b0f89c,46215,1731955680775.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:26,898 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C46215%2C1731955680775.meta:.meta(num 1731955681877) roll requested 2024-11-18T18:48:26,899 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46215%2C1731955680775.meta.1731955706899.meta 2024-11-18T18:48:26,905 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:26,905 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:26,905 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:26,905 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:26,905 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:26,906 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.meta.1731955681877.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.meta.1731955706899.meta 2024-11-18T18:48:26,906 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:26,906 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:26,906 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.meta.1731955681877.meta 2024-11-18T18:48:26,907 WARN [IPC Server handler 3 on default port 39915 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.meta.1731955681877.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-18T18:48:26,907 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.meta.1731955681877.meta after 1ms 2024-11-18T18:48:26,911 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42237:42237),(127.0.0.1/127.0.0.1:33819:33819)] 2024-11-18T18:48:26,911 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.meta.1731955681877.meta is not closed yet, will try archiving it next time 2024-11-18T18:48:26,928 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/.tmp/info/a8f106909c6a420691b975351ada65d0 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af./info:regioninfo/1731955682389/Put/seqid=0 2024-11-18T18:48:26,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741841_1025 (size=7125) 2024-11-18T18:48:26,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741841_1025 (size=7125) 2024-11-18T18:48:26,934 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/.tmp/info/a8f106909c6a420691b975351ada65d0 2024-11-18T18:48:26,952 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/.tmp/ns/ef1286174a434c6e9d398d0b4ed53bee is 43, key is default/ns:d/1731955681956/Put/seqid=0 2024-11-18T18:48:26,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741842_1026 (size=5153) 2024-11-18T18:48:26,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741842_1026 (size=5153) 2024-11-18T18:48:26,957 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/.tmp/ns/ef1286174a434c6e9d398d0b4ed53bee 2024-11-18T18:48:26,975 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/.tmp/table/5855008b859c416c85dced54db733afe is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731955682404/Put/seqid=0 2024-11-18T18:48:26,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741843_1027 (size=5438) 2024-11-18T18:48:26,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741843_1027 (size=5438) 2024-11-18T18:48:26,980 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/.tmp/table/5855008b859c416c85dced54db733afe 2024-11-18T18:48:26,986 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/.tmp/info/a8f106909c6a420691b975351ada65d0 as hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/info/a8f106909c6a420691b975351ada65d0 2024-11-18T18:48:26,991 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/info/a8f106909c6a420691b975351ada65d0, entries=10, sequenceid=11, filesize=7.0 K 2024-11-18T18:48:26,992 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/.tmp/ns/ef1286174a434c6e9d398d0b4ed53bee as hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/ns/ef1286174a434c6e9d398d0b4ed53bee 2024-11-18T18:48:26,999 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/ns/ef1286174a434c6e9d398d0b4ed53bee, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T18:48:27,000 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/.tmp/table/5855008b859c416c85dced54db733afe as hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/table/5855008b859c416c85dced54db733afe 2024-11-18T18:48:27,006 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/table/5855008b859c416c85dced54db733afe, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T18:48:27,008 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 109ms, sequenceid=11, compaction requested=false 2024-11-18T18:48:27,008 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T18:48:27,008 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f1790a755abd4dc512f3c6df9e1a12af 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-18T18:48:27,008 ERROR [FSHLog-0-hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff-prefix:39fff3b0f89c,46215,1731955680775 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:27,008 WARN [FSHLog-0-hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff-prefix:39fff3b0f89c,46215,1731955680775 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:27,009 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C46215%2C1731955680775:(num 1731955702862) roll requested 2024-11-18T18:48:27,009 INFO [regionserver/39fff3b0f89c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46215%2C1731955680775.1731955707009 2024-11-18T18:48:27,014 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 newFile=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955707009 2024-11-18T18:48:27,014 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:27,014 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:27,014 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:27,014 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:27,014 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:27,014 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955707009 2024-11-18T18:48:27,015 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:27,015 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1843742824-172.17.0.2-1731955678813:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:27,015 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 2024-11-18T18:48:27,016 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 after 1ms 2024-11-18T18:48:27,019 DEBUG [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42237:42237),(127.0.0.1/127.0.0.1:33819:33819)] 2024-11-18T18:48:27,019 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 to hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/oldWALs/39fff3b0f89c%2C46215%2C1731955680775.1731955702862 2024-11-18T18:48:27,032 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/default/TestLogRolling-testLogRollOnPipelineRestart/f1790a755abd4dc512f3c6df9e1a12af/.tmp/info/224e64408d594ae780f2a15d708abb09 is 1080, key is row1002/info:/1731955692098/Put/seqid=0 2024-11-18T18:48:27,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741845_1029 (size=9270) 2024-11-18T18:48:27,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741845_1029 (size=9270) 2024-11-18T18:48:27,038 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/default/TestLogRolling-testLogRollOnPipelineRestart/f1790a755abd4dc512f3c6df9e1a12af/.tmp/info/224e64408d594ae780f2a15d708abb09 2024-11-18T18:48:27,045 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/default/TestLogRolling-testLogRollOnPipelineRestart/f1790a755abd4dc512f3c6df9e1a12af/.tmp/info/224e64408d594ae780f2a15d708abb09 as hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/default/TestLogRolling-testLogRollOnPipelineRestart/f1790a755abd4dc512f3c6df9e1a12af/info/224e64408d594ae780f2a15d708abb09 2024-11-18T18:48:27,051 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/default/TestLogRolling-testLogRollOnPipelineRestart/f1790a755abd4dc512f3c6df9e1a12af/info/224e64408d594ae780f2a15d708abb09, entries=4, sequenceid=8, filesize=9.1 K 2024-11-18T18:48:27,052 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for f1790a755abd4dc512f3c6df9e1a12af in 44ms, sequenceid=8, compaction requested=false 2024-11-18T18:48:27,052 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for f1790a755abd4dc512f3c6df9e1a12af: 2024-11-18T18:48:27,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T18:48:27,058 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:48:27,058 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:48:27,058 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:48:27,058 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:48:27,058 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T18:48:27,059 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T18:48:27,059 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=207666871, stopped=false 2024-11-18T18:48:27,059 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39fff3b0f89c,42629,1731955680638 2024-11-18T18:48:27,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:48:27,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:48:27,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:27,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:27,097 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:48:27,097 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:48:27,097 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:48:27,097 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:48:27,098 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:48:27,098 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:48:27,098 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39fff3b0f89c,46215,1731955680775' ***** 2024-11-18T18:48:27,098 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T18:48:27,098 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T18:48:27,098 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T18:48:27,098 INFO [RS:0;39fff3b0f89c:46215 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T18:48:27,098 INFO [RS:0;39fff3b0f89c:46215 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T18:48:27,098 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(3091): Received CLOSE for f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:27,099 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(959): stopping server 39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:27,099 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:48:27,099 INFO [RS:0;39fff3b0f89c:46215 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39fff3b0f89c:46215. 2024-11-18T18:48:27,099 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f1790a755abd4dc512f3c6df9e1a12af, disabling compactions & flushes 2024-11-18T18:48:27,099 DEBUG [RS:0;39fff3b0f89c:46215 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:48:27,099 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:27,099 DEBUG [RS:0;39fff3b0f89c:46215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:48:27,099 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:27,099 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. after waiting 0 ms 2024-11-18T18:48:27,099 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:27,099 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T18:48:27,099 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T18:48:27,099 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T18:48:27,099 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T18:48:27,100 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T18:48:27,100 DEBUG [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, f1790a755abd4dc512f3c6df9e1a12af=TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af.} 2024-11-18T18:48:27,100 DEBUG [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, f1790a755abd4dc512f3c6df9e1a12af 2024-11-18T18:48:27,100 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:48:27,100 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:48:27,100 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:48:27,100 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:48:27,100 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:48:27,108 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/default/TestLogRolling-testLogRollOnPipelineRestart/f1790a755abd4dc512f3c6df9e1a12af/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-18T18:48:27,109 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:27,109 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f1790a755abd4dc512f3c6df9e1a12af: Waiting for close lock at 1731955707099Running coprocessor pre-close hooks at 1731955707099Disabling compacts and flushes for region at 1731955707099Disabling writes for close at 1731955707099Writing region close event to WAL at 1731955707100 (+1 ms)Running coprocessor post-close hooks at 1731955707109 (+9 ms)Closed at 1731955707109 2024-11-18T18:48:27,109 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731955682015.f1790a755abd4dc512f3c6df9e1a12af. 2024-11-18T18:48:27,111 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T18:48:27,112 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:48:27,112 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:48:27,112 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955707100Running coprocessor pre-close hooks at 1731955707100Disabling compacts and flushes for region at 1731955707100Disabling writes for close at 1731955707100Writing region close event to WAL at 1731955707108 (+8 ms)Running coprocessor post-close hooks at 1731955707112 (+4 ms)Closed at 1731955707112 2024-11-18T18:48:27,112 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T18:48:27,150 INFO [regionserver/39fff3b0f89c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:48:27,206 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T18:48:27,206 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T18:48:27,300 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(976): stopping server 39fff3b0f89c,46215,1731955680775; all regions closed. 2024-11-18T18:48:27,301 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:27,301 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:27,301 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:27,302 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:27,302 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:27,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741840_1023 (size=825) 2024-11-18T18:48:27,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741840_1023 (size=825) 2024-11-18T18:48:27,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:27,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:28,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:28,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:29,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:48:29,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T18:48:29,035 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T18:48:29,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:29,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:30,261 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T18:48:30,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:30,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:30,619 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T18:48:30,908 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.meta.1731955681877.meta after 4002ms 2024-11-18T18:48:30,909 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/WALs/39fff3b0f89c,46215,1731955680775/39fff3b0f89c%2C46215%2C1731955680775.meta.1731955681877.meta to hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/oldWALs/39fff3b0f89c%2C46215%2C1731955680775.meta.1731955681877.meta 2024-11-18T18:48:30,912 DEBUG [RS:0;39fff3b0f89c:46215 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/oldWALs 2024-11-18T18:48:30,912 INFO [RS:0;39fff3b0f89c:46215 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C46215%2C1731955680775.meta:.meta(num 1731955706899) 2024-11-18T18:48:30,913 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:30,913 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:30,913 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:30,913 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:30,914 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:30,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741844_1028 (size=1162) 2024-11-18T18:48:30,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741844_1028 (size=1162) 2024-11-18T18:48:30,924 DEBUG [RS:0;39fff3b0f89c:46215 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/oldWALs 2024-11-18T18:48:30,924 INFO [RS:0;39fff3b0f89c:46215 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C46215%2C1731955680775:(num 1731955707009) 2024-11-18T18:48:30,924 DEBUG [RS:0;39fff3b0f89c:46215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:48:30,924 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:48:30,924 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:48:30,924 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.ChoreService(370): Chore service for: regionserver/39fff3b0f89c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T18:48:30,925 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:48:30,925 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:48:30,925 INFO [RS:0;39fff3b0f89c:46215 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46215 2024-11-18T18:48:30,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39fff3b0f89c,46215,1731955680775 2024-11-18T18:48:30,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:48:30,963 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:48:30,972 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39fff3b0f89c,46215,1731955680775] 2024-11-18T18:48:30,980 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39fff3b0f89c,46215,1731955680775 already deleted, retry=false 2024-11-18T18:48:30,980 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39fff3b0f89c,46215,1731955680775 expired; onlineServers=0 2024-11-18T18:48:30,980 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39fff3b0f89c,42629,1731955680638' ***** 2024-11-18T18:48:30,980 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T18:48:30,980 INFO [M:0;39fff3b0f89c:42629 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:48:30,980 INFO [M:0;39fff3b0f89c:42629 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:48:30,981 DEBUG [M:0;39fff3b0f89c:42629 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T18:48:30,981 DEBUG [M:0;39fff3b0f89c:42629 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T18:48:30,981 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T18:48:30,981 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955681066 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955681066,5,FailOnTimeoutGroup] 2024-11-18T18:48:30,981 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955681066 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955681066,5,FailOnTimeoutGroup] 2024-11-18T18:48:30,981 INFO [M:0;39fff3b0f89c:42629 {}] hbase.ChoreService(370): Chore service for: master/39fff3b0f89c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T18:48:30,982 INFO [M:0;39fff3b0f89c:42629 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:48:30,982 DEBUG [M:0;39fff3b0f89c:42629 {}] master.HMaster(1795): Stopping service threads 2024-11-18T18:48:30,982 INFO [M:0;39fff3b0f89c:42629 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T18:48:30,982 INFO [M:0;39fff3b0f89c:42629 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:48:30,983 INFO [M:0;39fff3b0f89c:42629 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T18:48:30,983 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T18:48:30,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T18:48:30,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:30,989 DEBUG [M:0;39fff3b0f89c:42629 {}] zookeeper.ZKUtil(347): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T18:48:30,989 WARN [M:0;39fff3b0f89c:42629 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T18:48:30,990 INFO [M:0;39fff3b0f89c:42629 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/.lastflushedseqids 2024-11-18T18:48:30,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741846_1030 (size=120) 2024-11-18T18:48:30,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741846_1030 (size=120) 2024-11-18T18:48:30,997 INFO [M:0;39fff3b0f89c:42629 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T18:48:30,997 INFO [M:0;39fff3b0f89c:42629 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T18:48:30,997 DEBUG [M:0;39fff3b0f89c:42629 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:48:30,997 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:30,997 DEBUG [M:0;39fff3b0f89c:42629 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:30,997 DEBUG [M:0;39fff3b0f89c:42629 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:48:30,997 DEBUG [M:0;39fff3b0f89c:42629 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:30,997 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-18T18:48:30,998 ERROR [FSHLog-0-hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData-prefix:39fff3b0f89c,42629,1731955680638 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:30,998 WARN [FSHLog-0-hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData-prefix:39fff3b0f89c,42629,1731955680638 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:30,998 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 39fff3b0f89c%2C42629%2C1731955680638:(num 1731955680893) roll requested 2024-11-18T18:48:30,998 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C42629%2C1731955680638.1731955710998 2024-11-18T18:48:31,004 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:31,004 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:31,005 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:31,005 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:31,005 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:31,005 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638/39fff3b0f89c%2C42629%2C1731955680638.1731955680893 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638/39fff3b0f89c%2C42629%2C1731955680638.1731955710998 2024-11-18T18:48:31,005 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:31,005 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36505,DS-84706612-96b9-4d56-b728-74db80946132,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T18:48:31,005 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638/39fff3b0f89c%2C42629%2C1731955680638.1731955680893 2024-11-18T18:48:31,006 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33819:33819),(127.0.0.1/127.0.0.1:42237:42237)] 2024-11-18T18:48:31,006 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638/39fff3b0f89c%2C42629%2C1731955680638.1731955680893 is not closed yet, will try archiving it next time 2024-11-18T18:48:31,006 WARN [IPC Server handler 4 on default port 39915 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638/39fff3b0f89c%2C42629%2C1731955680638.1731955680893 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-18T18:48:31,006 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638/39fff3b0f89c%2C42629%2C1731955680638.1731955680893 after 1ms 2024-11-18T18:48:31,019 DEBUG [M:0;39fff3b0f89c:42629 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/055a22fc2d7b4d43a98e42be79d5b16d is 82, key is hbase:meta,,1/info:regioninfo/1731955681903/Put/seqid=0 2024-11-18T18:48:31,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741848_1033 (size=5672) 2024-11-18T18:48:31,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741848_1033 (size=5672) 2024-11-18T18:48:31,024 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/055a22fc2d7b4d43a98e42be79d5b16d 2024-11-18T18:48:31,044 DEBUG [M:0;39fff3b0f89c:42629 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/024ead3b082144a3bbbfd8d8b9eb9561 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731955682409/Put/seqid=0 2024-11-18T18:48:31,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741849_1034 (size=6118) 2024-11-18T18:48:31,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741849_1034 (size=6118) 2024-11-18T18:48:31,050 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/024ead3b082144a3bbbfd8d8b9eb9561 2024-11-18T18:48:31,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:48:31,072 INFO [RS:0;39fff3b0f89c:46215 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:48:31,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46215-0x101509017550001, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:48:31,072 INFO [RS:0;39fff3b0f89c:46215 {}] regionserver.HRegionServer(1031): Exiting; stopping=39fff3b0f89c,46215,1731955680775; zookeeper connection closed. 2024-11-18T18:48:31,072 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4433f12a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4433f12a 2024-11-18T18:48:31,072 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T18:48:31,073 DEBUG [M:0;39fff3b0f89c:42629 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5de6ff835b2242828d09e8e446f95eac is 69, key is 39fff3b0f89c,46215,1731955680775/rs:state/1731955681131/Put/seqid=0 2024-11-18T18:48:31,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741850_1035 (size=5156) 2024-11-18T18:48:31,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741850_1035 (size=5156) 2024-11-18T18:48:31,078 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5de6ff835b2242828d09e8e446f95eac 2024-11-18T18:48:31,097 DEBUG [M:0;39fff3b0f89c:42629 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/22af70b0b8964b6a98a8af1eff087d73 is 52, key is load_balancer_on/state:d/1731955682010/Put/seqid=0 2024-11-18T18:48:31,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741851_1036 (size=5056) 2024-11-18T18:48:31,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741851_1036 (size=5056) 2024-11-18T18:48:31,102 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/22af70b0b8964b6a98a8af1eff087d73 2024-11-18T18:48:31,108 DEBUG [M:0;39fff3b0f89c:42629 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/055a22fc2d7b4d43a98e42be79d5b16d as hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/055a22fc2d7b4d43a98e42be79d5b16d 2024-11-18T18:48:31,112 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/055a22fc2d7b4d43a98e42be79d5b16d, entries=8, sequenceid=56, filesize=5.5 K 2024-11-18T18:48:31,113 DEBUG [M:0;39fff3b0f89c:42629 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/024ead3b082144a3bbbfd8d8b9eb9561 as hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/024ead3b082144a3bbbfd8d8b9eb9561 2024-11-18T18:48:31,119 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/024ead3b082144a3bbbfd8d8b9eb9561, entries=6, sequenceid=56, filesize=6.0 K 2024-11-18T18:48:31,120 DEBUG [M:0;39fff3b0f89c:42629 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5de6ff835b2242828d09e8e446f95eac as hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5de6ff835b2242828d09e8e446f95eac 2024-11-18T18:48:31,125 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5de6ff835b2242828d09e8e446f95eac, entries=1, sequenceid=56, filesize=5.0 K 2024-11-18T18:48:31,126 DEBUG [M:0;39fff3b0f89c:42629 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/22af70b0b8964b6a98a8af1eff087d73 as hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/22af70b0b8964b6a98a8af1eff087d73 2024-11-18T18:48:31,131 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/22af70b0b8964b6a98a8af1eff087d73, entries=1, sequenceid=56, filesize=4.9 K 2024-11-18T18:48:31,132 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=56, compaction requested=false 2024-11-18T18:48:31,134 INFO [M:0;39fff3b0f89c:42629 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:31,134 DEBUG [M:0;39fff3b0f89c:42629 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955710997Disabling compacts and flushes for region at 1731955710997Disabling writes for close at 1731955710997Obtaining lock to block concurrent updates at 1731955710997Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731955710997Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731955710998 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731955711006 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731955711006Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731955711018 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731955711018Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731955711029 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731955711044 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731955711044Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731955711055 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731955711072 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731955711073 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731955711082 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731955711096 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731955711096Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@695d52d: reopening flushed file at 1731955711107 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7955e9ed: reopening flushed file at 1731955711112 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36c1a784: reopening flushed file at 1731955711119 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d60d530: reopening flushed file at 1731955711125 (+6 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=56, compaction requested=false at 1731955711132 (+7 ms)Writing region close event to WAL at 1731955711134 (+2 ms)Closed at 1731955711134 2024-11-18T18:48:31,134 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:31,134 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:31,134 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:31,134 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:31,134 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:48:31,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33221 is added to blk_1073741847_1031 (size=757) 2024-11-18T18:48:31,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36921 is added to blk_1073741847_1031 (size=757) 2024-11-18T18:48:31,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:31,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:32,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,138 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,142 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,142 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:32,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:32,644 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T18:48:32,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:32,669 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:33,264 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T18:48:33,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:33,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:34,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:34,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:35,008 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638/39fff3b0f89c%2C42629%2C1731955680638.1731955680893 after 4003ms 2024-11-18T18:48:35,009 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/WALs/39fff3b0f89c,42629,1731955680638/39fff3b0f89c%2C42629%2C1731955680638.1731955680893 to hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/oldWALs/39fff3b0f89c%2C42629%2C1731955680638.1731955680893 2024-11-18T18:48:35,017 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/MasterData/oldWALs/39fff3b0f89c%2C42629%2C1731955680638.1731955680893 to hdfs://localhost:39915/user/jenkins/test-data/5c933c57-f313-6db2-e423-ba60d44afeff/oldWALs/39fff3b0f89c%2C42629%2C1731955680638.1731955680893$masterlocalwal$ 2024-11-18T18:48:35,017 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:48:35,017 INFO [M:0;39fff3b0f89c:42629 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T18:48:35,017 INFO [M:0;39fff3b0f89c:42629 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42629 2024-11-18T18:48:35,017 INFO [M:0;39fff3b0f89c:42629 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:48:35,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:48:35,156 INFO [M:0;39fff3b0f89c:42629 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:48:35,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42629-0x101509017550000, quorum=127.0.0.1:52973, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:48:35,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58681746{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:35,191 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e65fed6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:48:35,191 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:48:35,191 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38a0c8b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:48:35,191 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78dbe58a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,STOPPED} 2024-11-18T18:48:35,194 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:48:35,194 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:48:35,194 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1843742824-172.17.0.2-1731955678813 (Datanode Uuid abc66573-6f57-4e0c-9d18-6c9c8ea3650f) service to localhost/127.0.0.1:39915 2024-11-18T18:48:35,194 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:48:35,195 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data3/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:35,196 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data4/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:35,196 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:48:35,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@119a914{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:35,200 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5efc28c8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:48:35,200 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:48:35,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ebe8434{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:48:35,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55678be6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,STOPPED} 2024-11-18T18:48:35,202 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:48:35,202 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:48:35,202 WARN [BP-1843742824-172.17.0.2-1731955678813 heartbeating to localhost/127.0.0.1:39915 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1843742824-172.17.0.2-1731955678813 (Datanode Uuid c2ebdf43-6772-4926-abea-cada2d72e8dc) service to localhost/127.0.0.1:39915 2024-11-18T18:48:35,202 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:48:35,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data1/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:35,203 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/cluster_38752716-c6af-5b4e-b3b8-67c23f109758/data/data2/current/BP-1843742824-172.17.0.2-1731955678813 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:48:35,203 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:48:35,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e2e6f70{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:48:35,210 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13f6b910{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:48:35,210 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:48:35,210 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@721d81ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:48:35,210 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28b84096{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir/,STOPPED} 2024-11-18T18:48:35,216 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T18:48:35,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T18:48:35,241 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39915 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:39915 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39915 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39915 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:39915 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:39915 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:39915 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39915 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=256 (was 337), ProcessCount=11 (was 11), AvailableMemoryMB=4360 (was 4158) - AvailableMemoryMB LEAK? - 2024-11-18T18:48:35,247 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=256, ProcessCount=11, AvailableMemoryMB=4360 2024-11-18T18:48:35,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T18:48:35,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.log.dir so I do NOT create it in target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5 2024-11-18T18:48:35,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d61b808a-2184-acea-cbf7-e416e9952655/hadoop.tmp.dir so I do NOT create it in target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5 2024-11-18T18:48:35,248 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1, deleteOnExit=true 2024-11-18T18:48:35,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T18:48:35,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/test.cache.data in system properties and HBase conf 2024-11-18T18:48:35,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T18:48:35,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/hadoop.log.dir in system properties and HBase conf 2024-11-18T18:48:35,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T18:48:35,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T18:48:35,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T18:48:35,248 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/nfs.dump.dir in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/java.io.tmpdir in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:48:35,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T18:48:35,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T18:48:35,261 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:48:35,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:35,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:35,546 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:48:35,550 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:48:35,551 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:48:35,551 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:48:35,551 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:48:35,551 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:48:35,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59ed7094{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:48:35,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c929db5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:48:35,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4f373d12{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/java.io.tmpdir/jetty-localhost-45783-hadoop-hdfs-3_4_1-tests_jar-_-any-9224756710856137892/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:48:35,646 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ceaf000{HTTP/1.1, (http/1.1)}{localhost:45783} 2024-11-18T18:48:35,646 INFO [Time-limited test {}] server.Server(415): Started @193035ms 2024-11-18T18:48:35,657 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:48:35,854 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:48:35,858 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:48:35,858 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:48:35,858 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:48:35,859 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:48:35,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@374344fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:48:35,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c078c5d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:48:35,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@730a9bd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/java.io.tmpdir/jetty-localhost-38255-hadoop-hdfs-3_4_1-tests_jar-_-any-18422479803211216016/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:35,955 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35966ec2{HTTP/1.1, (http/1.1)}{localhost:38255} 2024-11-18T18:48:35,955 INFO [Time-limited test {}] server.Server(415): Started @193344ms 2024-11-18T18:48:35,956 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:48:35,980 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:48:35,983 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:48:35,984 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:48:35,984 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:48:35,984 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:48:35,985 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@303cb3d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:48:35,985 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56a581cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:48:36,076 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a51b9ff{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/java.io.tmpdir/jetty-localhost-39703-hadoop-hdfs-3_4_1-tests_jar-_-any-4118853521693015547/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:48:36,077 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a5f6046{HTTP/1.1, (http/1.1)}{localhost:39703} 2024-11-18T18:48:36,077 INFO [Time-limited test {}] server.Server(415): Started @193466ms 2024-11-18T18:48:36,078 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:48:36,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:36,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:36,850 WARN [Thread-1656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1/data/data1/current/BP-547660075-172.17.0.2-1731955715271/current, will proceed with Du for space computation calculation, 2024-11-18T18:48:36,852 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1/data/data2/current/BP-547660075-172.17.0.2-1731955715271/current, will proceed with Du for space computation calculation, 2024-11-18T18:48:36,871 WARN [Thread-1620 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:48:36,875 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xacc81ec6ba949d6f with lease ID 0x7470e707ff3a303d: Processing first storage report for DS-0f2e87fb-3df3-4554-8213-01d1ffc6a539 from datanode DatanodeRegistration(127.0.0.1:45525, datanodeUuid=0fe2d0b7-fde5-4cbf-8ea1-279c566b11c5, infoPort=42109, infoSecurePort=0, ipcPort=35335, storageInfo=lv=-57;cid=testClusterID;nsid=1956068572;c=1731955715271) 2024-11-18T18:48:36,875 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xacc81ec6ba949d6f with lease ID 0x7470e707ff3a303d: from storage DS-0f2e87fb-3df3-4554-8213-01d1ffc6a539 node DatanodeRegistration(127.0.0.1:45525, datanodeUuid=0fe2d0b7-fde5-4cbf-8ea1-279c566b11c5, infoPort=42109, infoSecurePort=0, ipcPort=35335, storageInfo=lv=-57;cid=testClusterID;nsid=1956068572;c=1731955715271), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:36,875 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xacc81ec6ba949d6f with lease ID 0x7470e707ff3a303d: Processing first storage report for DS-7e170bfe-9744-47a0-8f9a-9b19a5787b34 from datanode DatanodeRegistration(127.0.0.1:45525, datanodeUuid=0fe2d0b7-fde5-4cbf-8ea1-279c566b11c5, infoPort=42109, infoSecurePort=0, ipcPort=35335, storageInfo=lv=-57;cid=testClusterID;nsid=1956068572;c=1731955715271) 2024-11-18T18:48:36,875 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xacc81ec6ba949d6f with lease ID 0x7470e707ff3a303d: from storage DS-7e170bfe-9744-47a0-8f9a-9b19a5787b34 node DatanodeRegistration(127.0.0.1:45525, datanodeUuid=0fe2d0b7-fde5-4cbf-8ea1-279c566b11c5, infoPort=42109, infoSecurePort=0, ipcPort=35335, storageInfo=lv=-57;cid=testClusterID;nsid=1956068572;c=1731955715271), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:36,973 WARN [Thread-1667 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1/data/data3/current/BP-547660075-172.17.0.2-1731955715271/current, will proceed with Du for space computation calculation, 2024-11-18T18:48:36,973 WARN [Thread-1668 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1/data/data4/current/BP-547660075-172.17.0.2-1731955715271/current, will proceed with Du for space computation calculation, 2024-11-18T18:48:36,994 WARN [Thread-1643 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:48:36,996 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc8953bbd956e6a67 with lease ID 0x7470e707ff3a303e: Processing first storage report for DS-25050913-cef8-428f-828b-affb61505fb1 from datanode DatanodeRegistration(127.0.0.1:36111, datanodeUuid=0e7559e8-5a3f-4348-b36c-c02bb27ceeed, infoPort=42737, infoSecurePort=0, ipcPort=42151, storageInfo=lv=-57;cid=testClusterID;nsid=1956068572;c=1731955715271) 2024-11-18T18:48:36,996 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc8953bbd956e6a67 with lease ID 0x7470e707ff3a303e: from storage DS-25050913-cef8-428f-828b-affb61505fb1 node DatanodeRegistration(127.0.0.1:36111, datanodeUuid=0e7559e8-5a3f-4348-b36c-c02bb27ceeed, infoPort=42737, infoSecurePort=0, ipcPort=42151, storageInfo=lv=-57;cid=testClusterID;nsid=1956068572;c=1731955715271), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:36,996 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc8953bbd956e6a67 with lease ID 0x7470e707ff3a303e: Processing first storage report for DS-c44288d6-9b7e-415b-8af1-2b56d6e1ecb7 from datanode DatanodeRegistration(127.0.0.1:36111, datanodeUuid=0e7559e8-5a3f-4348-b36c-c02bb27ceeed, infoPort=42737, infoSecurePort=0, ipcPort=42151, storageInfo=lv=-57;cid=testClusterID;nsid=1956068572;c=1731955715271) 2024-11-18T18:48:36,996 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc8953bbd956e6a67 with lease ID 0x7470e707ff3a303e: from storage DS-c44288d6-9b7e-415b-8af1-2b56d6e1ecb7 node DatanodeRegistration(127.0.0.1:36111, datanodeUuid=0e7559e8-5a3f-4348-b36c-c02bb27ceeed, infoPort=42737, infoSecurePort=0, ipcPort=42151, storageInfo=lv=-57;cid=testClusterID;nsid=1956068572;c=1731955715271), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:48:37,005 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5 2024-11-18T18:48:37,008 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1/zookeeper_0, clientPort=64583, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T18:48:37,009 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64583 2024-11-18T18:48:37,009 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:37,011 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:37,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:48:37,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:48:37,022 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc with version=8 2024-11-18T18:48:37,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/hbase-staging 2024-11-18T18:48:37,024 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:48:37,024 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:37,024 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:37,024 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:48:37,024 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:37,024 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:48:37,025 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T18:48:37,025 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:48:37,025 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37157 2024-11-18T18:48:37,027 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37157 connecting to ZooKeeper ensemble=127.0.0.1:64583 2024-11-18T18:48:37,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:371570x0, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:48:37,097 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37157-0x1015090a5780000 connected 2024-11-18T18:48:37,181 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:37,184 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:37,188 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:48:37,188 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc, hbase.cluster.distributed=false 2024-11-18T18:48:37,190 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:48:37,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37157 2024-11-18T18:48:37,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37157 2024-11-18T18:48:37,194 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37157 2024-11-18T18:48:37,194 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37157 2024-11-18T18:48:37,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37157 2024-11-18T18:48:37,210 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:48:37,211 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:37,211 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:37,211 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:48:37,211 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:48:37,211 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:48:37,211 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T18:48:37,211 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:48:37,212 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46055 2024-11-18T18:48:37,213 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46055 connecting to ZooKeeper ensemble=127.0.0.1:64583 2024-11-18T18:48:37,214 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:37,215 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:37,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:460550x0, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:48:37,223 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46055-0x1015090a5780001 connected 2024-11-18T18:48:37,223 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:48:37,223 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T18:48:37,223 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T18:48:37,224 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T18:48:37,225 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:48:37,226 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46055 2024-11-18T18:48:37,226 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46055 2024-11-18T18:48:37,226 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46055 2024-11-18T18:48:37,226 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46055 2024-11-18T18:48:37,226 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46055 2024-11-18T18:48:37,238 DEBUG [M:0;39fff3b0f89c:37157 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39fff3b0f89c:37157 2024-11-18T18:48:37,238 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39fff3b0f89c,37157,1731955717024 2024-11-18T18:48:37,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:48:37,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:48:37,248 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39fff3b0f89c,37157,1731955717024 2024-11-18T18:48:37,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:37,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T18:48:37,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:37,256 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T18:48:37,256 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39fff3b0f89c,37157,1731955717024 from backup master directory 2024-11-18T18:48:37,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39fff3b0f89c,37157,1731955717024 2024-11-18T18:48:37,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:48:37,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:48:37,264 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:48:37,264 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39fff3b0f89c,37157,1731955717024 2024-11-18T18:48:37,269 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/hbase.id] with ID: f6df36ba-eb54-429b-9a4a-648612f1c8ff 2024-11-18T18:48:37,269 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/.tmp/hbase.id 2024-11-18T18:48:37,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:48:37,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:48:37,278 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/.tmp/hbase.id]:[hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/hbase.id] 2024-11-18T18:48:37,291 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:37,291 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T18:48:37,293 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T18:48:37,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:37,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:37,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:48:37,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:48:37,316 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:48:37,317 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T18:48:37,317 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:48:37,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:48:37,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:48:37,324 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store 2024-11-18T18:48:37,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:48:37,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:48:37,332 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:37,333 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:48:37,333 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:37,333 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:37,333 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:48:37,333 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:37,333 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:48:37,333 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955717333Disabling compacts and flushes for region at 1731955717333Disabling writes for close at 1731955717333Writing region close event to WAL at 1731955717333Closed at 1731955717333 2024-11-18T18:48:37,334 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/.initializing 2024-11-18T18:48:37,334 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/WALs/39fff3b0f89c,37157,1731955717024 2024-11-18T18:48:37,336 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C37157%2C1731955717024, suffix=, logDir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/WALs/39fff3b0f89c,37157,1731955717024, archiveDir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/oldWALs, maxLogs=10 2024-11-18T18:48:37,337 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C37157%2C1731955717024.1731955717336 2024-11-18T18:48:37,341 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/WALs/39fff3b0f89c,37157,1731955717024/39fff3b0f89c%2C37157%2C1731955717024.1731955717336 2024-11-18T18:48:37,342 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42737:42737),(127.0.0.1/127.0.0.1:42109:42109)] 2024-11-18T18:48:37,343 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:48:37,343 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:37,343 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,343 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,344 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T18:48:37,345 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:37,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:37,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,347 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T18:48:37,347 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:37,347 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:48:37,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T18:48:37,349 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:37,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:48:37,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T18:48:37,351 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:37,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:48:37,351 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,352 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,352 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,354 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,354 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,355 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T18:48:37,357 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:48:37,359 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:48:37,359 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832031, jitterRate=0.05798310041427612}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T18:48:37,360 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731955717343Initializing all the Stores at 1731955717344 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955717344Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955717344Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955717344Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955717344Cleaning up temporary data from old regions at 1731955717354 (+10 ms)Region opened successfully at 1731955717360 (+6 ms) 2024-11-18T18:48:37,361 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T18:48:37,364 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3c53c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:48:37,365 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T18:48:37,365 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T18:48:37,365 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T18:48:37,366 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T18:48:37,366 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T18:48:37,366 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T18:48:37,367 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T18:48:37,369 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T18:48:37,370 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T18:48:37,379 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T18:48:37,379 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T18:48:37,380 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T18:48:37,389 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T18:48:37,389 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T18:48:37,391 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T18:48:37,397 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T18:48:37,399 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T18:48:37,406 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T18:48:37,409 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T18:48:37,414 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T18:48:37,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:37,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:37,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:48:37,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:48:37,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:37,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:37,423 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39fff3b0f89c,37157,1731955717024, sessionid=0x1015090a5780000, setting cluster-up flag (Was=false) 2024-11-18T18:48:37,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:37,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:37,464 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T18:48:37,465 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,37157,1731955717024 2024-11-18T18:48:37,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:37,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:37,505 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T18:48:37,507 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,37157,1731955717024 2024-11-18T18:48:37,508 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T18:48:37,510 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T18:48:37,510 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T18:48:37,510 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T18:48:37,510 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39fff3b0f89c,37157,1731955717024 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T18:48:37,512 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:48:37,512 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:48:37,512 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:48:37,512 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:48:37,512 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39fff3b0f89c:0, corePoolSize=10, maxPoolSize=10 2024-11-18T18:48:37,512 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,512 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:48:37,512 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,513 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731955747513 2024-11-18T18:48:37,513 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T18:48:37,513 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T18:48:37,513 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T18:48:37,513 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T18:48:37,513 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T18:48:37,513 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T18:48:37,513 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,514 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T18:48:37,514 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T18:48:37,514 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:48:37,514 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T18:48:37,514 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T18:48:37,514 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T18:48:37,514 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T18:48:37,514 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955717514,5,FailOnTimeoutGroup] 2024-11-18T18:48:37,515 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955717514,5,FailOnTimeoutGroup] 2024-11-18T18:48:37,515 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,515 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T18:48:37,515 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,515 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,515 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:37,515 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T18:48:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:48:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:48:37,525 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T18:48:37,525 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc 2024-11-18T18:48:37,528 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(746): ClusterId : f6df36ba-eb54-429b-9a4a-648612f1c8ff 2024-11-18T18:48:37,528 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T18:48:37,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:48:37,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:48:37,531 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:37,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:48:37,534 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:48:37,534 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:37,534 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:37,534 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:48:37,535 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:48:37,535 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:37,536 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:37,536 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:48:37,537 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:48:37,537 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:37,537 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:37,537 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:48:37,539 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:48:37,539 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:37,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:37,539 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:48:37,540 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T18:48:37,540 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T18:48:37,540 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740 2024-11-18T18:48:37,540 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740 2024-11-18T18:48:37,541 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:48:37,541 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:48:37,542 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:48:37,543 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:48:37,545 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:48:37,545 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743884, jitterRate=-0.05410274863243103}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:48:37,546 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731955717532Initializing all the Stores at 1731955717532Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955717532Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955717532Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955717532Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955717532Cleaning up temporary data from old regions at 1731955717541 (+9 ms)Region opened successfully at 1731955717546 (+5 ms) 2024-11-18T18:48:37,546 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:48:37,546 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:48:37,546 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:48:37,546 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:48:37,546 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:48:37,547 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:48:37,547 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955717546Disabling compacts and flushes for region at 1731955717546Disabling writes for close at 1731955717546Writing region close event to WAL at 1731955717546Closed at 1731955717546 2024-11-18T18:48:37,548 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T18:48:37,548 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:48:37,548 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T18:48:37,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T18:48:37,548 DEBUG [RS:0;39fff3b0f89c:46055 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@178178ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:48:37,549 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:48:37,550 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T18:48:37,560 DEBUG [RS:0;39fff3b0f89c:46055 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39fff3b0f89c:46055 2024-11-18T18:48:37,560 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T18:48:37,560 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T18:48:37,560 DEBUG [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T18:48:37,561 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(2659): reportForDuty to master=39fff3b0f89c,37157,1731955717024 with port=46055, startcode=1731955717210 2024-11-18T18:48:37,561 DEBUG [RS:0;39fff3b0f89c:46055 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T18:48:37,563 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57599, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T18:48:37,563 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37157 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39fff3b0f89c,46055,1731955717210 2024-11-18T18:48:37,563 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37157 {}] master.ServerManager(517): Registering regionserver=39fff3b0f89c,46055,1731955717210 2024-11-18T18:48:37,565 DEBUG [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc 2024-11-18T18:48:37,565 DEBUG [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35791 2024-11-18T18:48:37,565 DEBUG [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T18:48:37,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:48:37,573 DEBUG [RS:0;39fff3b0f89c:46055 {}] zookeeper.ZKUtil(111): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39fff3b0f89c,46055,1731955717210 2024-11-18T18:48:37,573 WARN [RS:0;39fff3b0f89c:46055 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:48:37,573 INFO [RS:0;39fff3b0f89c:46055 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:48:37,573 DEBUG [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210 2024-11-18T18:48:37,573 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39fff3b0f89c,46055,1731955717210] 2024-11-18T18:48:37,576 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T18:48:37,577 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T18:48:37,578 INFO [RS:0;39fff3b0f89c:46055 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T18:48:37,578 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,578 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T18:48:37,579 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T18:48:37,579 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,579 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,579 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,579 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,579 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,579 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,579 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:48:37,579 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,579 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,579 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,579 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,579 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,580 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:48:37,580 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:48:37,580 DEBUG [RS:0;39fff3b0f89c:46055 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:48:37,580 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,580 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,580 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,580 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,580 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,580 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,46055,1731955717210-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:48:37,594 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T18:48:37,594 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,46055,1731955717210-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,594 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,594 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.Replication(171): 39fff3b0f89c,46055,1731955717210 started 2024-11-18T18:48:37,607 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:37,607 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(1482): Serving as 39fff3b0f89c,46055,1731955717210, RpcServer on 39fff3b0f89c/172.17.0.2:46055, sessionid=0x1015090a5780001 2024-11-18T18:48:37,608 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T18:48:37,608 DEBUG [RS:0;39fff3b0f89c:46055 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39fff3b0f89c,46055,1731955717210 2024-11-18T18:48:37,608 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,46055,1731955717210' 2024-11-18T18:48:37,608 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T18:48:37,608 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T18:48:37,609 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T18:48:37,609 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T18:48:37,609 DEBUG [RS:0;39fff3b0f89c:46055 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39fff3b0f89c,46055,1731955717210 2024-11-18T18:48:37,609 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,46055,1731955717210' 2024-11-18T18:48:37,609 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T18:48:37,609 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T18:48:37,610 DEBUG [RS:0;39fff3b0f89c:46055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T18:48:37,610 INFO [RS:0;39fff3b0f89c:46055 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T18:48:37,610 INFO [RS:0;39fff3b0f89c:46055 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T18:48:37,701 WARN [39fff3b0f89c:37157 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-18T18:48:37,713 INFO [RS:0;39fff3b0f89c:46055 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C46055%2C1731955717210, suffix=, logDir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210, archiveDir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/oldWALs, maxLogs=32 2024-11-18T18:48:37,714 INFO [RS:0;39fff3b0f89c:46055 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46055%2C1731955717210.1731955717713 2024-11-18T18:48:37,724 INFO [RS:0;39fff3b0f89c:46055 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955717713 2024-11-18T18:48:37,729 DEBUG [RS:0;39fff3b0f89c:46055 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42109:42109),(127.0.0.1/127.0.0.1:42737:42737)] 2024-11-18T18:48:37,951 DEBUG [39fff3b0f89c:37157 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T18:48:37,952 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39fff3b0f89c,46055,1731955717210 2024-11-18T18:48:37,956 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,46055,1731955717210, state=OPENING 2024-11-18T18:48:38,006 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T18:48:38,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:38,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:48:38,015 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:48:38,015 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:48:38,015 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:48:38,016 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,46055,1731955717210}] 2024-11-18T18:48:38,172 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T18:48:38,176 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33537, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T18:48:38,184 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T18:48:38,185 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:48:38,187 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C46055%2C1731955717210.meta, suffix=.meta, logDir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210, archiveDir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/oldWALs, maxLogs=32 2024-11-18T18:48:38,188 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46055%2C1731955717210.meta.1731955718188.meta 2024-11-18T18:48:38,194 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.meta.1731955718188.meta 2024-11-18T18:48:38,199 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42737:42737),(127.0.0.1/127.0.0.1:42109:42109)] 2024-11-18T18:48:38,200 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:48:38,201 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T18:48:38,201 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T18:48:38,201 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T18:48:38,201 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T18:48:38,201 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:38,201 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T18:48:38,201 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T18:48:38,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:48:38,204 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:48:38,204 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:38,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:38,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:48:38,205 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:48:38,205 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:38,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:38,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:48:38,206 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:48:38,206 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:38,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:38,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:48:38,207 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:48:38,207 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:38,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:48:38,207 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:48:38,208 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740 2024-11-18T18:48:38,209 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740 2024-11-18T18:48:38,210 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:48:38,210 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:48:38,210 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:48:38,211 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:48:38,212 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722088, jitterRate=-0.08181807398796082}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:48:38,212 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T18:48:38,213 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731955718201Writing region info on filesystem at 1731955718202 (+1 ms)Initializing all the Stores at 1731955718203 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955718203Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955718203Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955718203Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955718203Cleaning up temporary data from old regions at 1731955718210 (+7 ms)Running coprocessor post-open hooks at 1731955718212 (+2 ms)Region opened successfully at 1731955718213 (+1 ms) 2024-11-18T18:48:38,214 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731955718171 2024-11-18T18:48:38,216 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T18:48:38,216 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T18:48:38,217 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,46055,1731955717210 2024-11-18T18:48:38,218 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,46055,1731955717210, state=OPEN 2024-11-18T18:48:38,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:48:38,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:48:38,255 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39fff3b0f89c,46055,1731955717210 2024-11-18T18:48:38,255 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:48:38,255 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:48:38,258 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T18:48:38,258 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,46055,1731955717210 in 240 msec 2024-11-18T18:48:38,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T18:48:38,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 710 msec 2024-11-18T18:48:38,262 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:48:38,262 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T18:48:38,263 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:48:38,264 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,46055,1731955717210, seqNum=-1] 2024-11-18T18:48:38,264 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:48:38,265 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46795, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:48:38,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 761 msec 2024-11-18T18:48:38,272 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731955718272, completionTime=-1 2024-11-18T18:48:38,272 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T18:48:38,272 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-18T18:48:38,274 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-18T18:48:38,274 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731955778274 2024-11-18T18:48:38,274 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731955838274 2024-11-18T18:48:38,274 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-18T18:48:38,274 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,37157,1731955717024-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:38,274 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,37157,1731955717024-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:38,274 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,37157,1731955717024-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:38,274 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39fff3b0f89c:37157, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:38,274 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:38,275 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:38,276 DEBUG [master/39fff3b0f89c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T18:48:38,279 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.015sec 2024-11-18T18:48:38,279 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T18:48:38,279 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T18:48:38,279 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T18:48:38,279 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T18:48:38,279 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T18:48:38,279 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,37157,1731955717024-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:48:38,279 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,37157,1731955717024-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T18:48:38,282 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T18:48:38,282 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T18:48:38,282 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,37157,1731955717024-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:48:38,329 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5254105f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:48:38,329 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39fff3b0f89c,37157,-1 for getting cluster id 2024-11-18T18:48:38,329 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T18:48:38,331 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f6df36ba-eb54-429b-9a4a-648612f1c8ff' 2024-11-18T18:48:38,331 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T18:48:38,331 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f6df36ba-eb54-429b-9a4a-648612f1c8ff" 2024-11-18T18:48:38,332 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74632920, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:48:38,332 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39fff3b0f89c,37157,-1] 2024-11-18T18:48:38,332 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T18:48:38,332 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:48:38,333 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59696, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T18:48:38,334 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f658bc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:48:38,335 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:48:38,336 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,46055,1731955717210, seqNum=-1] 2024-11-18T18:48:38,337 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:48:38,338 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58824, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:48:38,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39fff3b0f89c,37157,1731955717024 2024-11-18T18:48:38,341 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:48:38,343 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T18:48:38,344 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T18:48:38,345 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 39fff3b0f89c,37157,1731955717024 2024-11-18T18:48:38,345 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@be2620b 2024-11-18T18:48:38,345 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T18:48:38,346 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59706, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T18:48:38,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T18:48:38,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T18:48:38,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:48:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:48:38,350 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T18:48:38,350 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:38,350 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-18T18:48:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T18:48:38,351 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T18:48:38,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741835_1011 (size=405) 2024-11-18T18:48:38,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741835_1011 (size=405) 2024-11-18T18:48:38,360 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7b69efae1404fc678996560628a5ba95, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc 2024-11-18T18:48:38,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741836_1012 (size=88) 2024-11-18T18:48:38,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741836_1012 (size=88) 2024-11-18T18:48:38,368 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:38,368 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 7b69efae1404fc678996560628a5ba95, disabling compactions & flushes 2024-11-18T18:48:38,368 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:38,368 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:38,368 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. after waiting 0 ms 2024-11-18T18:48:38,368 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:38,368 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:38,368 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7b69efae1404fc678996560628a5ba95: Waiting for close lock at 1731955718368Disabling compacts and flushes for region at 1731955718368Disabling writes for close at 1731955718368Writing region close event to WAL at 1731955718368Closed at 1731955718368 2024-11-18T18:48:38,370 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T18:48:38,370 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731955718370"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731955718370"}]},"ts":"1731955718370"} 2024-11-18T18:48:38,373 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T18:48:38,374 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T18:48:38,374 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731955718374"}]},"ts":"1731955718374"} 2024-11-18T18:48:38,376 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-18T18:48:38,376 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7b69efae1404fc678996560628a5ba95, ASSIGN}] 2024-11-18T18:48:38,378 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7b69efae1404fc678996560628a5ba95, ASSIGN 2024-11-18T18:48:38,379 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7b69efae1404fc678996560628a5ba95, ASSIGN; state=OFFLINE, location=39fff3b0f89c,46055,1731955717210; forceNewPlan=false, retain=false 2024-11-18T18:48:38,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:38,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:38,530 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7b69efae1404fc678996560628a5ba95, regionState=OPENING, regionLocation=39fff3b0f89c,46055,1731955717210 2024-11-18T18:48:38,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7b69efae1404fc678996560628a5ba95, ASSIGN because future has completed 2024-11-18T18:48:38,538 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7b69efae1404fc678996560628a5ba95, server=39fff3b0f89c,46055,1731955717210}] 2024-11-18T18:48:38,700 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:38,701 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7b69efae1404fc678996560628a5ba95, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:48:38,701 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 7b69efae1404fc678996560628a5ba95 2024-11-18T18:48:38,701 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:48:38,702 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7b69efae1404fc678996560628a5ba95 2024-11-18T18:48:38,702 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7b69efae1404fc678996560628a5ba95 2024-11-18T18:48:38,704 INFO [StoreOpener-7b69efae1404fc678996560628a5ba95-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7b69efae1404fc678996560628a5ba95 2024-11-18T18:48:38,706 INFO [StoreOpener-7b69efae1404fc678996560628a5ba95-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b69efae1404fc678996560628a5ba95 columnFamilyName info 2024-11-18T18:48:38,707 DEBUG [StoreOpener-7b69efae1404fc678996560628a5ba95-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:48:38,707 INFO [StoreOpener-7b69efae1404fc678996560628a5ba95-1 {}] regionserver.HStore(327): Store=7b69efae1404fc678996560628a5ba95/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:48:38,707 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7b69efae1404fc678996560628a5ba95 2024-11-18T18:48:38,708 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95 2024-11-18T18:48:38,709 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95 2024-11-18T18:48:38,709 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7b69efae1404fc678996560628a5ba95 2024-11-18T18:48:38,709 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7b69efae1404fc678996560628a5ba95 2024-11-18T18:48:38,712 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7b69efae1404fc678996560628a5ba95 2024-11-18T18:48:38,714 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:48:38,715 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7b69efae1404fc678996560628a5ba95; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722880, jitterRate=-0.0808115303516388}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T18:48:38,715 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7b69efae1404fc678996560628a5ba95 2024-11-18T18:48:38,716 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7b69efae1404fc678996560628a5ba95: Running coprocessor pre-open hook at 1731955718702Writing region info on filesystem at 1731955718702Initializing all the Stores at 1731955718703 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955718703Cleaning up temporary data from old regions at 1731955718709 (+6 ms)Running coprocessor post-open hooks at 1731955718715 (+6 ms)Region opened successfully at 1731955718716 (+1 ms) 2024-11-18T18:48:38,717 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95., pid=6, masterSystemTime=1731955718691 2024-11-18T18:48:38,719 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:38,720 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:38,720 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7b69efae1404fc678996560628a5ba95, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,46055,1731955717210 2024-11-18T18:48:38,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7b69efae1404fc678996560628a5ba95, server=39fff3b0f89c,46055,1731955717210 because future has completed 2024-11-18T18:48:38,727 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T18:48:38,727 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7b69efae1404fc678996560628a5ba95, server=39fff3b0f89c,46055,1731955717210 in 186 msec 2024-11-18T18:48:38,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T18:48:38,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7b69efae1404fc678996560628a5ba95, ASSIGN in 351 msec 2024-11-18T18:48:38,730 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T18:48:38,730 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731955718730"}]},"ts":"1731955718730"} 2024-11-18T18:48:38,732 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-18T18:48:38,733 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T18:48:38,735 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 386 msec 2024-11-18T18:48:39,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T18:48:39,034 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T18:48:39,036 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:48:39,036 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T18:48:39,037 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:48:39,037 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T18:48:39,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:39,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:40,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:40,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:41,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:41,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:42,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:42,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:43,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,203 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,204 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,232 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,232 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,232 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:43,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:43,740 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T18:48:43,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:48:43,775 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T18:48:43,775 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-18T18:48:44,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:44,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:45,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:45,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:46,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:46,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:47,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:47,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:48,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:48,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T18:48:48,430 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T18:48:48,431 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-18T18:48:48,438 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:48:48,438 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:48,441 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95., hostname=39fff3b0f89c,46055,1731955717210, seqNum=2] 2024-11-18T18:48:48,448 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:48:48,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:48:48,454 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T18:48:48,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-18T18:48:48,455 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T18:48:48,456 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T18:48:48,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46055 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-18T18:48:48,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:48,617 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 7b69efae1404fc678996560628a5ba95 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T18:48:48,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/4f48f378d5da4dbcbe73cfc934c0e0c4 is 1080, key is row0001/info:/1731955728442/Put/seqid=0 2024-11-18T18:48:48,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741837_1013 (size=6033) 2024-11-18T18:48:48,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741837_1013 (size=6033) 2024-11-18T18:48:48,640 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/4f48f378d5da4dbcbe73cfc934c0e0c4 2024-11-18T18:48:48,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/4f48f378d5da4dbcbe73cfc934c0e0c4 as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/4f48f378d5da4dbcbe73cfc934c0e0c4 2024-11-18T18:48:48,652 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/4f48f378d5da4dbcbe73cfc934c0e0c4, entries=1, sequenceid=5, filesize=5.9 K 2024-11-18T18:48:48,653 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b69efae1404fc678996560628a5ba95 in 36ms, sequenceid=5, compaction requested=false 2024-11-18T18:48:48,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 7b69efae1404fc678996560628a5ba95: 2024-11-18T18:48:48,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:48,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-18T18:48:48,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-18T18:48:48,659 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-18T18:48:48,660 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 201 msec 2024-11-18T18:48:48,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 210 msec 2024-11-18T18:48:49,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:49,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:50,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:50,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:51,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:51,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:52,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:52,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:53,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:53,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:54,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:54,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:55,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:55,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:56,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:56,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:57,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:57,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:58,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:58,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:58,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-18T18:48:58,549 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T18:48:58,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:48:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:48:58,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-18T18:48:58,557 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T18:48:58,558 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T18:48:58,558 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T18:48:58,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46055 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-18T18:48:58,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:58,712 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 7b69efae1404fc678996560628a5ba95 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T18:48:58,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/5f68229293ea42cbaad03d3ead437f8a is 1080, key is row0002/info:/1731955738551/Put/seqid=0 2024-11-18T18:48:58,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741838_1014 (size=6033) 2024-11-18T18:48:58,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741838_1014 (size=6033) 2024-11-18T18:48:58,726 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/5f68229293ea42cbaad03d3ead437f8a 2024-11-18T18:48:58,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/5f68229293ea42cbaad03d3ead437f8a as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/5f68229293ea42cbaad03d3ead437f8a 2024-11-18T18:48:58,739 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/5f68229293ea42cbaad03d3ead437f8a, entries=1, sequenceid=9, filesize=5.9 K 2024-11-18T18:48:58,740 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b69efae1404fc678996560628a5ba95 in 28ms, sequenceid=9, compaction requested=false 2024-11-18T18:48:58,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 7b69efae1404fc678996560628a5ba95: 2024-11-18T18:48:58,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:48:58,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-18T18:48:58,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-18T18:48:58,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-18T18:48:58,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-18T18:48:58,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 193 msec 2024-11-18T18:48:59,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:59,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:48:59,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 after 68069ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:48:59,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta after 68053ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T18:49:00,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:00,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:01,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:01,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:02,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:02,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:03,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:03,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:04,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:04,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:05,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:05,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:06,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:06,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:07,005 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T18:49:07,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:07,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:08,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:08,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:08,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-18T18:49:08,569 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T18:49:08,571 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46055%2C1731955717210.1731955748571 2024-11-18T18:49:08,578 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:08,578 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:08,579 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:08,579 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:08,579 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:08,579 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955717713 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955748571 2024-11-18T18:49:08,580 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42109:42109),(127.0.0.1/127.0.0.1:42737:42737)] 2024-11-18T18:49:08,580 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955717713 is not closed yet, will try archiving it next time 2024-11-18T18:49:08,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741833_1009 (size=5546) 2024-11-18T18:49:08,581 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:49:08,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741833_1009 (size=5546) 2024-11-18T18:49:08,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:49:08,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-18T18:49:08,583 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T18:49:08,585 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T18:49:08,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T18:49:08,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46055 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-18T18:49:08,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:49:08,739 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 7b69efae1404fc678996560628a5ba95 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T18:49:08,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/2d9c81a90d9042c8bcdf29a8e5b52931 is 1080, key is row0003/info:/1731955748570/Put/seqid=0 2024-11-18T18:49:08,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741840_1016 (size=6033) 2024-11-18T18:49:08,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741840_1016 (size=6033) 2024-11-18T18:49:08,754 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/2d9c81a90d9042c8bcdf29a8e5b52931 2024-11-18T18:49:08,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/2d9c81a90d9042c8bcdf29a8e5b52931 as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/2d9c81a90d9042c8bcdf29a8e5b52931 2024-11-18T18:49:08,770 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/2d9c81a90d9042c8bcdf29a8e5b52931, entries=1, sequenceid=13, filesize=5.9 K 2024-11-18T18:49:08,772 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b69efae1404fc678996560628a5ba95 in 34ms, sequenceid=13, compaction requested=true 2024-11-18T18:49:08,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 7b69efae1404fc678996560628a5ba95: 2024-11-18T18:49:08,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:49:08,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-18T18:49:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-18T18:49:08,776 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-18T18:49:08,776 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 189 msec 2024-11-18T18:49:08,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 196 msec 2024-11-18T18:49:09,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:09,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:10,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:10,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:11,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:11,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:12,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:12,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:13,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:13,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:14,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:14,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:15,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:15,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:16,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:16,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:17,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:17,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:18,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:18,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:18,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-18T18:49:18,620 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T18:49:18,620 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:49:18,623 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:49:18,623 DEBUG [Time-limited test {}] regionserver.HStore(1541): 7b69efae1404fc678996560628a5ba95/info is initiating minor compaction (all files) 2024-11-18T18:49:18,624 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T18:49:18,624 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:18,624 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 7b69efae1404fc678996560628a5ba95/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:49:18,624 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/4f48f378d5da4dbcbe73cfc934c0e0c4, hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/5f68229293ea42cbaad03d3ead437f8a, hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/2d9c81a90d9042c8bcdf29a8e5b52931] into tmpdir=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp, totalSize=17.7 K 2024-11-18T18:49:18,626 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 4f48f378d5da4dbcbe73cfc934c0e0c4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731955728442 2024-11-18T18:49:18,627 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5f68229293ea42cbaad03d3ead437f8a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731955738551 2024-11-18T18:49:18,628 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 2d9c81a90d9042c8bcdf29a8e5b52931, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731955748570 2024-11-18T18:49:18,640 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 7b69efae1404fc678996560628a5ba95#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:49:18,640 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/42284d560a8d462b80720a5aed1e931e is 1080, key is row0001/info:/1731955728442/Put/seqid=0 2024-11-18T18:49:18,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741841_1017 (size=8296) 2024-11-18T18:49:18,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741841_1017 (size=8296) 2024-11-18T18:49:18,650 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/42284d560a8d462b80720a5aed1e931e as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/42284d560a8d462b80720a5aed1e931e 2024-11-18T18:49:18,656 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7b69efae1404fc678996560628a5ba95/info of 7b69efae1404fc678996560628a5ba95 into 42284d560a8d462b80720a5aed1e931e(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:49:18,657 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 7b69efae1404fc678996560628a5ba95: 2024-11-18T18:49:18,659 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46055%2C1731955717210.1731955758659 2024-11-18T18:49:18,664 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:18,665 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:18,665 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:18,665 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:18,665 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:18,665 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955748571 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955758659 2024-11-18T18:49:18,666 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42109:42109),(127.0.0.1/127.0.0.1:42737:42737)] 2024-11-18T18:49:18,666 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955748571 is not closed yet, will try archiving it next time 2024-11-18T18:49:18,666 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955717713 to hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/oldWALs/39fff3b0f89c%2C46055%2C1731955717210.1731955717713 2024-11-18T18:49:18,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741839_1015 (size=2520) 2024-11-18T18:49:18,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741839_1015 (size=2520) 2024-11-18T18:49:18,667 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:49:18,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:49:18,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-18T18:49:18,669 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T18:49:18,669 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T18:49:18,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T18:49:18,759 INFO [master/39fff3b0f89c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T18:49:18,759 INFO [master/39fff3b0f89c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T18:49:18,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46055 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-18T18:49:18,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:49:18,823 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 7b69efae1404fc678996560628a5ba95 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T18:49:18,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/f0db9a272f1d47248acc35a06a1b0b7f is 1080, key is row0000/info:/1731955758658/Put/seqid=0 2024-11-18T18:49:18,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741843_1019 (size=6033) 2024-11-18T18:49:18,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741843_1019 (size=6033) 2024-11-18T18:49:18,840 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/f0db9a272f1d47248acc35a06a1b0b7f 2024-11-18T18:49:18,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/f0db9a272f1d47248acc35a06a1b0b7f as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/f0db9a272f1d47248acc35a06a1b0b7f 2024-11-18T18:49:18,855 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/f0db9a272f1d47248acc35a06a1b0b7f, entries=1, sequenceid=18, filesize=5.9 K 2024-11-18T18:49:18,856 INFO [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b69efae1404fc678996560628a5ba95 in 33ms, sequenceid=18, compaction requested=false 2024-11-18T18:49:18,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 7b69efae1404fc678996560628a5ba95: 2024-11-18T18:49:18,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:49:18,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-18T18:49:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-18T18:49:18,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-18T18:49:18,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 188 msec 2024-11-18T18:49:18,864 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-11-18T18:49:19,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:19,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:20,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:20,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:21,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:21,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:22,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:22,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:23,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:23,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:23,701 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7b69efae1404fc678996560628a5ba95, had cached 0 bytes from a total of 14329 2024-11-18T18:49:24,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:24,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:25,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:25,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:26,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:26,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:27,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:27,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:28,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:28,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:28,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-18T18:49:28,760 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T18:49:28,766 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C46055%2C1731955717210.1731955768766 2024-11-18T18:49:28,808 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:28,809 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:28,809 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:28,809 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:28,810 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:28,810 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955758659 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955768766 2024-11-18T18:49:28,812 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42737:42737),(127.0.0.1/127.0.0.1:42109:42109)] 2024-11-18T18:49:28,812 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955758659 is not closed yet, will try archiving it next time 2024-11-18T18:49:28,812 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/WALs/39fff3b0f89c,46055,1731955717210/39fff3b0f89c%2C46055%2C1731955717210.1731955748571 to hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/oldWALs/39fff3b0f89c%2C46055%2C1731955717210.1731955748571 2024-11-18T18:49:28,812 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T18:49:28,813 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:49:28,813 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:49:28,813 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:49:28,814 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:49:28,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741842_1018 (size=2026) 2024-11-18T18:49:28,814 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T18:49:28,814 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T18:49:28,814 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1686605989, stopped=false 2024-11-18T18:49:28,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741842_1018 (size=2026) 2024-11-18T18:49:28,815 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39fff3b0f89c,37157,1731955717024 2024-11-18T18:49:28,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:49:28,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:49:28,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:28,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:28,885 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:49:28,886 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:49:28,886 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:49:28,886 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:49:28,887 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:49:28,887 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:49:28,887 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39fff3b0f89c,46055,1731955717210' ***** 2024-11-18T18:49:28,887 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T18:49:28,888 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T18:49:28,888 INFO [RS:0;39fff3b0f89c:46055 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T18:49:28,888 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T18:49:28,888 INFO [RS:0;39fff3b0f89c:46055 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T18:49:28,888 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(3091): Received CLOSE for 7b69efae1404fc678996560628a5ba95 2024-11-18T18:49:28,889 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(959): stopping server 39fff3b0f89c,46055,1731955717210 2024-11-18T18:49:28,889 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:49:28,889 INFO [RS:0;39fff3b0f89c:46055 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39fff3b0f89c:46055. 2024-11-18T18:49:28,889 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7b69efae1404fc678996560628a5ba95, disabling compactions & flushes 2024-11-18T18:49:28,889 DEBUG [RS:0;39fff3b0f89c:46055 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:49:28,889 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:49:28,889 DEBUG [RS:0;39fff3b0f89c:46055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:49:28,889 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:49:28,889 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. after waiting 0 ms 2024-11-18T18:49:28,889 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T18:49:28,889 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:49:28,889 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T18:49:28,890 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T18:49:28,890 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T18:49:28,890 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 7b69efae1404fc678996560628a5ba95 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T18:49:28,890 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T18:49:28,890 DEBUG [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 7b69efae1404fc678996560628a5ba95=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.} 2024-11-18T18:49:28,890 DEBUG [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7b69efae1404fc678996560628a5ba95 2024-11-18T18:49:28,890 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:49:28,890 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:49:28,890 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:49:28,890 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:49:28,890 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:49:28,890 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-18T18:49:28,895 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/02e1a70e4569461594299d2a7548a9cf is 1080, key is row0001/info:/1731955768763/Put/seqid=0 2024-11-18T18:49:28,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741845_1021 (size=6033) 2024-11-18T18:49:28,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741845_1021 (size=6033) 2024-11-18T18:49:28,900 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/02e1a70e4569461594299d2a7548a9cf 2024-11-18T18:49:28,906 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/.tmp/info/02e1a70e4569461594299d2a7548a9cf as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/02e1a70e4569461594299d2a7548a9cf 2024-11-18T18:49:28,910 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/.tmp/info/9480262504874d8b96bea4e51cf71957 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95./info:regioninfo/1731955718720/Put/seqid=0 2024-11-18T18:49:28,911 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/02e1a70e4569461594299d2a7548a9cf, entries=1, sequenceid=22, filesize=5.9 K 2024-11-18T18:49:28,912 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b69efae1404fc678996560628a5ba95 in 23ms, sequenceid=22, compaction requested=true 2024-11-18T18:49:28,912 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/4f48f378d5da4dbcbe73cfc934c0e0c4, hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/5f68229293ea42cbaad03d3ead437f8a, hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/2d9c81a90d9042c8bcdf29a8e5b52931] to archive 2024-11-18T18:49:28,913 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T18:49:28,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741846_1022 (size=7308) 2024-11-18T18:49:28,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741846_1022 (size=7308) 2024-11-18T18:49:28,915 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/.tmp/info/9480262504874d8b96bea4e51cf71957 2024-11-18T18:49:28,915 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/4f48f378d5da4dbcbe73cfc934c0e0c4 to hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/4f48f378d5da4dbcbe73cfc934c0e0c4 2024-11-18T18:49:28,917 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/5f68229293ea42cbaad03d3ead437f8a to hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/5f68229293ea42cbaad03d3ead437f8a 2024-11-18T18:49:28,918 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/2d9c81a90d9042c8bcdf29a8e5b52931 to hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/info/2d9c81a90d9042c8bcdf29a8e5b52931 2024-11-18T18:49:28,918 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=39fff3b0f89c:37157 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-18T18:49:28,919 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [4f48f378d5da4dbcbe73cfc934c0e0c4=6033, 5f68229293ea42cbaad03d3ead437f8a=6033, 2d9c81a90d9042c8bcdf29a8e5b52931=6033] 2024-11-18T18:49:28,924 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7b69efae1404fc678996560628a5ba95/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-18T18:49:28,925 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:49:28,925 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7b69efae1404fc678996560628a5ba95: Waiting for close lock at 1731955768889Running coprocessor pre-close hooks at 1731955768889Disabling compacts and flushes for region at 1731955768889Disabling writes for close at 1731955768889Obtaining lock to block concurrent updates at 1731955768890 (+1 ms)Preparing flush snapshotting stores in 7b69efae1404fc678996560628a5ba95 at 1731955768890Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731955768890Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. at 1731955768891 (+1 ms)Flushing 7b69efae1404fc678996560628a5ba95/info: creating writer at 1731955768891Flushing 7b69efae1404fc678996560628a5ba95/info: appending metadata at 1731955768894 (+3 ms)Flushing 7b69efae1404fc678996560628a5ba95/info: closing flushed file at 1731955768894Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28b966c4: reopening flushed file at 1731955768905 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7b69efae1404fc678996560628a5ba95 in 23ms, sequenceid=22, compaction requested=true at 1731955768912 (+7 ms)Writing region close event to WAL at 1731955768919 (+7 ms)Running coprocessor post-close hooks at 1731955768924 (+5 ms)Closed at 1731955768925 (+1 ms) 2024-11-18T18:49:28,925 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731955718347.7b69efae1404fc678996560628a5ba95. 2024-11-18T18:49:28,935 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/.tmp/ns/65b2e29ea5f64e38ac1ea47a9c4446be is 43, key is default/ns:d/1731955718266/Put/seqid=0 2024-11-18T18:49:28,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741847_1023 (size=5153) 2024-11-18T18:49:28,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741847_1023 (size=5153) 2024-11-18T18:49:28,940 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/.tmp/ns/65b2e29ea5f64e38ac1ea47a9c4446be 2024-11-18T18:49:28,960 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/.tmp/table/df9337f402164e789d8417b6fee1a0ac is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731955718730/Put/seqid=0 2024-11-18T18:49:28,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741848_1024 (size=5508) 2024-11-18T18:49:28,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741848_1024 (size=5508) 2024-11-18T18:49:28,965 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/.tmp/table/df9337f402164e789d8417b6fee1a0ac 2024-11-18T18:49:28,971 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/.tmp/info/9480262504874d8b96bea4e51cf71957 as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/info/9480262504874d8b96bea4e51cf71957 2024-11-18T18:49:28,976 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/info/9480262504874d8b96bea4e51cf71957, entries=10, sequenceid=11, filesize=7.1 K 2024-11-18T18:49:28,977 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/.tmp/ns/65b2e29ea5f64e38ac1ea47a9c4446be as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/ns/65b2e29ea5f64e38ac1ea47a9c4446be 2024-11-18T18:49:28,982 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/ns/65b2e29ea5f64e38ac1ea47a9c4446be, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T18:49:28,983 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/.tmp/table/df9337f402164e789d8417b6fee1a0ac as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/table/df9337f402164e789d8417b6fee1a0ac 2024-11-18T18:49:28,988 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/table/df9337f402164e789d8417b6fee1a0ac, entries=2, sequenceid=11, filesize=5.4 K 2024-11-18T18:49:28,989 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 99ms, sequenceid=11, compaction requested=false 2024-11-18T18:49:28,993 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T18:49:28,993 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:49:28,993 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:49:28,993 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955768890Running coprocessor pre-close hooks at 1731955768890Disabling compacts and flushes for region at 1731955768890Disabling writes for close at 1731955768890Obtaining lock to block concurrent updates at 1731955768890Preparing flush snapshotting stores in 1588230740 at 1731955768890Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731955768891 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731955768892 (+1 ms)Flushing 1588230740/info: creating writer at 1731955768892Flushing 1588230740/info: appending metadata at 1731955768910 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731955768910Flushing 1588230740/ns: creating writer at 1731955768920 (+10 ms)Flushing 1588230740/ns: appending metadata at 1731955768935 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731955768935Flushing 1588230740/table: creating writer at 1731955768944 (+9 ms)Flushing 1588230740/table: appending metadata at 1731955768960 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731955768960Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63742582: reopening flushed file at 1731955768970 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@aa400a4: reopening flushed file at 1731955768976 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4367a551: reopening flushed file at 1731955768982 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 99ms, sequenceid=11, compaction requested=false at 1731955768989 (+7 ms)Writing region close event to WAL at 1731955768990 (+1 ms)Running coprocessor post-close hooks at 1731955768993 (+3 ms)Closed at 1731955768993 2024-11-18T18:49:28,993 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T18:49:29,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:49:29,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T18:49:29,035 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T18:49:29,090 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(976): stopping server 39fff3b0f89c,46055,1731955717210; all regions closed. 2024-11-18T18:49:29,091 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,091 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,091 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,092 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,092 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741834_1010 (size=3306) 2024-11-18T18:49:29,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741834_1010 (size=3306) 2024-11-18T18:49:29,101 DEBUG [RS:0;39fff3b0f89c:46055 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/oldWALs 2024-11-18T18:49:29,101 INFO [RS:0;39fff3b0f89c:46055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C46055%2C1731955717210.meta:.meta(num 1731955718188) 2024-11-18T18:49:29,101 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,102 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,102 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,102 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,102 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741844_1020 (size=1252) 2024-11-18T18:49:29,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741844_1020 (size=1252) 2024-11-18T18:49:29,109 DEBUG [RS:0;39fff3b0f89c:46055 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/oldWALs 2024-11-18T18:49:29,109 INFO [RS:0;39fff3b0f89c:46055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C46055%2C1731955717210:(num 1731955768766) 2024-11-18T18:49:29,109 DEBUG [RS:0;39fff3b0f89c:46055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:49:29,109 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:49:29,109 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:49:29,109 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.ChoreService(370): Chore service for: regionserver/39fff3b0f89c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T18:49:29,109 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:49:29,110 INFO [RS:0;39fff3b0f89c:46055 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46055 2024-11-18T18:49:29,110 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:49:29,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:49:29,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39fff3b0f89c,46055,1731955717210 2024-11-18T18:49:29,119 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:49:29,128 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39fff3b0f89c,46055,1731955717210] 2024-11-18T18:49:29,136 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39fff3b0f89c,46055,1731955717210 already deleted, retry=false 2024-11-18T18:49:29,136 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39fff3b0f89c,46055,1731955717210 expired; onlineServers=0 2024-11-18T18:49:29,136 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39fff3b0f89c,37157,1731955717024' ***** 2024-11-18T18:49:29,136 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T18:49:29,136 INFO [M:0;39fff3b0f89c:37157 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:49:29,136 INFO [M:0;39fff3b0f89c:37157 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:49:29,136 DEBUG [M:0;39fff3b0f89c:37157 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T18:49:29,136 DEBUG [M:0;39fff3b0f89c:37157 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T18:49:29,136 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T18:49:29,136 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955717514 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955717514,5,FailOnTimeoutGroup] 2024-11-18T18:49:29,136 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955717514 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955717514,5,FailOnTimeoutGroup] 2024-11-18T18:49:29,137 INFO [M:0;39fff3b0f89c:37157 {}] hbase.ChoreService(370): Chore service for: master/39fff3b0f89c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T18:49:29,137 INFO [M:0;39fff3b0f89c:37157 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:49:29,137 DEBUG [M:0;39fff3b0f89c:37157 {}] master.HMaster(1795): Stopping service threads 2024-11-18T18:49:29,137 INFO [M:0;39fff3b0f89c:37157 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T18:49:29,137 INFO [M:0;39fff3b0f89c:37157 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:49:29,137 INFO [M:0;39fff3b0f89c:37157 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T18:49:29,137 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T18:49:29,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T18:49:29,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:29,144 DEBUG [M:0;39fff3b0f89c:37157 {}] zookeeper.ZKUtil(347): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T18:49:29,144 WARN [M:0;39fff3b0f89c:37157 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T18:49:29,145 INFO [M:0;39fff3b0f89c:37157 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/.lastflushedseqids 2024-11-18T18:49:29,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741849_1025 (size=130) 2024-11-18T18:49:29,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741849_1025 (size=130) 2024-11-18T18:49:29,151 INFO [M:0;39fff3b0f89c:37157 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T18:49:29,151 INFO [M:0;39fff3b0f89c:37157 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T18:49:29,151 DEBUG [M:0;39fff3b0f89c:37157 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:49:29,151 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:49:29,151 DEBUG [M:0;39fff3b0f89c:37157 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:49:29,151 DEBUG [M:0;39fff3b0f89c:37157 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:49:29,151 DEBUG [M:0;39fff3b0f89c:37157 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:49:29,151 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-18T18:49:29,165 DEBUG [M:0;39fff3b0f89c:37157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ec5e96292eb44f0eb3e9738b0a9bc4d7 is 82, key is hbase:meta,,1/info:regioninfo/1731955718217/Put/seqid=0 2024-11-18T18:49:29,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741850_1026 (size=5672) 2024-11-18T18:49:29,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741850_1026 (size=5672) 2024-11-18T18:49:29,170 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ec5e96292eb44f0eb3e9738b0a9bc4d7 2024-11-18T18:49:29,191 DEBUG [M:0;39fff3b0f89c:37157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c1bad363c99e4cb3840ee714107d6f6f is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731955718734/Put/seqid=0 2024-11-18T18:49:29,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741851_1027 (size=7823) 2024-11-18T18:49:29,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741851_1027 (size=7823) 2024-11-18T18:49:29,196 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c1bad363c99e4cb3840ee714107d6f6f 2024-11-18T18:49:29,200 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c1bad363c99e4cb3840ee714107d6f6f 2024-11-18T18:49:29,213 DEBUG [M:0;39fff3b0f89c:37157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/99cfb0bbca824717b12cac0a31d7e9fe is 69, key is 39fff3b0f89c,46055,1731955717210/rs:state/1731955717563/Put/seqid=0 2024-11-18T18:49:29,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741852_1028 (size=5156) 2024-11-18T18:49:29,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741852_1028 (size=5156) 2024-11-18T18:49:29,218 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/99cfb0bbca824717b12cac0a31d7e9fe 2024-11-18T18:49:29,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:49:29,228 INFO [RS:0;39fff3b0f89c:46055 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:49:29,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46055-0x1015090a5780001, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:49:29,228 INFO [RS:0;39fff3b0f89c:46055 {}] regionserver.HRegionServer(1031): Exiting; stopping=39fff3b0f89c,46055,1731955717210; zookeeper connection closed. 2024-11-18T18:49:29,228 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6cd41e34 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6cd41e34 2024-11-18T18:49:29,228 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T18:49:29,237 DEBUG [M:0;39fff3b0f89c:37157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/009cad85ed264e32a5786a38a4696316 is 52, key is load_balancer_on/state:d/1731955718342/Put/seqid=0 2024-11-18T18:49:29,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741853_1029 (size=5056) 2024-11-18T18:49:29,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741853_1029 (size=5056) 2024-11-18T18:49:29,241 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/009cad85ed264e32a5786a38a4696316 2024-11-18T18:49:29,246 DEBUG [M:0;39fff3b0f89c:37157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ec5e96292eb44f0eb3e9738b0a9bc4d7 as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ec5e96292eb44f0eb3e9738b0a9bc4d7 2024-11-18T18:49:29,250 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ec5e96292eb44f0eb3e9738b0a9bc4d7, entries=8, sequenceid=121, filesize=5.5 K 2024-11-18T18:49:29,251 DEBUG [M:0;39fff3b0f89c:37157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c1bad363c99e4cb3840ee714107d6f6f as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c1bad363c99e4cb3840ee714107d6f6f 2024-11-18T18:49:29,257 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c1bad363c99e4cb3840ee714107d6f6f 2024-11-18T18:49:29,257 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c1bad363c99e4cb3840ee714107d6f6f, entries=14, sequenceid=121, filesize=7.6 K 2024-11-18T18:49:29,258 DEBUG [M:0;39fff3b0f89c:37157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/99cfb0bbca824717b12cac0a31d7e9fe as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/99cfb0bbca824717b12cac0a31d7e9fe 2024-11-18T18:49:29,262 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/99cfb0bbca824717b12cac0a31d7e9fe, entries=1, sequenceid=121, filesize=5.0 K 2024-11-18T18:49:29,263 DEBUG [M:0;39fff3b0f89c:37157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/009cad85ed264e32a5786a38a4696316 as hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/009cad85ed264e32a5786a38a4696316 2024-11-18T18:49:29,267 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35791/user/jenkins/test-data/9eb81762-a580-f165-0808-b15d05627fcc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/009cad85ed264e32a5786a38a4696316, entries=1, sequenceid=121, filesize=4.9 K 2024-11-18T18:49:29,268 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=121, compaction requested=false 2024-11-18T18:49:29,270 INFO [M:0;39fff3b0f89c:37157 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:49:29,270 DEBUG [M:0;39fff3b0f89c:37157 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955769151Disabling compacts and flushes for region at 1731955769151Disabling writes for close at 1731955769151Obtaining lock to block concurrent updates at 1731955769151Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731955769151Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1731955769152 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731955769153 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731955769153Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731955769165 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731955769165Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731955769174 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731955769190 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731955769190Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731955769200 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731955769213 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731955769213Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731955769222 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731955769236 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731955769236Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@334c1daa: reopening flushed file at 1731955769245 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a55b321: reopening flushed file at 1731955769251 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ee2e9cc: reopening flushed file at 1731955769257 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d5a0462: reopening flushed file at 1731955769262 (+5 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=121, compaction requested=false at 1731955769268 (+6 ms)Writing region close event to WAL at 1731955769270 (+2 ms)Closed at 1731955769270 2024-11-18T18:49:29,270 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,270 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,270 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,270 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,271 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:49:29,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45525 is added to blk_1073741830_1006 (size=53035) 2024-11-18T18:49:29,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36111 is added to blk_1073741830_1006 (size=53035) 2024-11-18T18:49:29,273 INFO [M:0;39fff3b0f89c:37157 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T18:49:29,273 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:49:29,273 INFO [M:0;39fff3b0f89c:37157 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37157 2024-11-18T18:49:29,273 INFO [M:0;39fff3b0f89c:37157 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:49:29,378 INFO [M:0;39fff3b0f89c:37157 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:49:29,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:49:29,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37157-0x1015090a5780000, quorum=127.0.0.1:64583, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:49:29,411 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a51b9ff{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:49:29,412 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a5f6046{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:49:29,412 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:49:29,412 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56a581cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:49:29,412 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@303cb3d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/hadoop.log.dir/,STOPPED} 2024-11-18T18:49:29,416 WARN [BP-547660075-172.17.0.2-1731955715271 heartbeating to localhost/127.0.0.1:35791 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:49:29,416 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:49:29,416 WARN [BP-547660075-172.17.0.2-1731955715271 heartbeating to localhost/127.0.0.1:35791 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-547660075-172.17.0.2-1731955715271 (Datanode Uuid 0e7559e8-5a3f-4348-b36c-c02bb27ceeed) service to localhost/127.0.0.1:35791 2024-11-18T18:49:29,416 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:49:29,417 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1/data/data3/current/BP-547660075-172.17.0.2-1731955715271 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:49:29,417 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1/data/data4/current/BP-547660075-172.17.0.2-1731955715271 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:49:29,418 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:49:29,421 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@730a9bd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:49:29,421 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35966ec2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:49:29,421 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:49:29,421 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c078c5d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:49:29,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@374344fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/hadoop.log.dir/,STOPPED} 2024-11-18T18:49:29,423 WARN [BP-547660075-172.17.0.2-1731955715271 heartbeating to localhost/127.0.0.1:35791 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:49:29,423 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:49:29,423 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:49:29,423 WARN [BP-547660075-172.17.0.2-1731955715271 heartbeating to localhost/127.0.0.1:35791 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-547660075-172.17.0.2-1731955715271 (Datanode Uuid 0fe2d0b7-fde5-4cbf-8ea1-279c566b11c5) service to localhost/127.0.0.1:35791 2024-11-18T18:49:29,424 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1/data/data1/current/BP-547660075-172.17.0.2-1731955715271 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:49:29,424 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/cluster_5f36b8af-1bfd-b2d8-282e-e225af9357e1/data/data2/current/BP-547660075-172.17.0.2-1731955715271 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:49:29,424 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:49:29,429 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4f373d12{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:49:29,430 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ceaf000{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:49:29,430 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:49:29,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c929db5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:49:29,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59ed7094{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/hadoop.log.dir/,STOPPED} 2024-11-18T18:49:29,435 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T18:49:29,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T18:49:29,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:29,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:29,459 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:35791 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/39fff3b0f89c:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:35791 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35791 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:35791 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:35791 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=485 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=210 (was 256), ProcessCount=11 (was 11), AvailableMemoryMB=4366 (was 4360) - AvailableMemoryMB LEAK? - 2024-11-18T18:49:29,465 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=210, ProcessCount=11, AvailableMemoryMB=4366 2024-11-18T18:49:29,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T18:49:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/hadoop.log.dir so I do NOT create it in target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6 2024-11-18T18:49:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/79eed651-a3fd-a9c1-2a19-49d19b8b9ac5/hadoop.tmp.dir so I do NOT create it in target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6 2024-11-18T18:49:29,466 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2, deleteOnExit=true 2024-11-18T18:49:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T18:49:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/test.cache.data in system properties and HBase conf 2024-11-18T18:49:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T18:49:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/hadoop.log.dir in system properties and HBase conf 2024-11-18T18:49:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T18:49:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T18:49:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T18:49:29,466 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T18:49:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/nfs.dump.dir in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/java.io.tmpdir in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:49:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T18:49:29,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T18:49:29,480 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:49:29,583 INFO [regionserver/39fff3b0f89c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:49:29,804 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:49:29,808 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:49:29,809 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:49:29,809 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:49:29,809 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:49:29,809 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:49:29,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c05323f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:49:29,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6150e164{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:49:29,901 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3225d099{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/java.io.tmpdir/jetty-localhost-36481-hadoop-hdfs-3_4_1-tests_jar-_-any-15115167710752971305/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:49:29,902 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5c2acbdd{HTTP/1.1, (http/1.1)}{localhost:36481} 2024-11-18T18:49:29,902 INFO [Time-limited test {}] server.Server(415): Started @247291ms 2024-11-18T18:49:29,913 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:49:30,114 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:49:30,116 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:49:30,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:49:30,117 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:49:30,117 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:49:30,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@638450ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:49:30,118 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ef16bcf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:49:30,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@389941f3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/java.io.tmpdir/jetty-localhost-37167-hadoop-hdfs-3_4_1-tests_jar-_-any-7823289776726189906/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:49:30,210 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1370e23f{HTTP/1.1, (http/1.1)}{localhost:37167} 2024-11-18T18:49:30,210 INFO [Time-limited test {}] server.Server(415): Started @247599ms 2024-11-18T18:49:30,211 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:49:30,238 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:49:30,241 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:49:30,242 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:49:30,242 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:49:30,242 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:49:30,242 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e0ccf7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:49:30,243 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51d47a1d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:49:30,334 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2cfb8c65{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/java.io.tmpdir/jetty-localhost-43731-hadoop-hdfs-3_4_1-tests_jar-_-any-10941175508192739933/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:49:30,335 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33cca936{HTTP/1.1, (http/1.1)}{localhost:43731} 2024-11-18T18:49:30,335 INFO [Time-limited test {}] server.Server(415): Started @247724ms 2024-11-18T18:49:30,336 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:49:30,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:30,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:31,113 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2/data/data1/current/BP-695063187-172.17.0.2-1731955769491/current, will proceed with Du for space computation calculation, 2024-11-18T18:49:31,114 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2/data/data2/current/BP-695063187-172.17.0.2-1731955769491/current, will proceed with Du for space computation calculation, 2024-11-18T18:49:31,133 WARN [Thread-1936 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:49:31,135 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xebce6c9ecfa74887 with lease ID 0x6508f68420e1300d: Processing first storage report for DS-30fe8604-a507-40d0-a921-44da1c9b9981 from datanode DatanodeRegistration(127.0.0.1:39443, datanodeUuid=eef88227-c2e2-46b2-80ca-e86d1a417b16, infoPort=45575, infoSecurePort=0, ipcPort=41449, storageInfo=lv=-57;cid=testClusterID;nsid=1122233650;c=1731955769491) 2024-11-18T18:49:31,135 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xebce6c9ecfa74887 with lease ID 0x6508f68420e1300d: from storage DS-30fe8604-a507-40d0-a921-44da1c9b9981 node DatanodeRegistration(127.0.0.1:39443, datanodeUuid=eef88227-c2e2-46b2-80ca-e86d1a417b16, infoPort=45575, infoSecurePort=0, ipcPort=41449, storageInfo=lv=-57;cid=testClusterID;nsid=1122233650;c=1731955769491), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T18:49:31,135 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xebce6c9ecfa74887 with lease ID 0x6508f68420e1300d: Processing first storage report for DS-4498dc31-fe77-4b15-b0e4-6a77d10514e5 from datanode DatanodeRegistration(127.0.0.1:39443, datanodeUuid=eef88227-c2e2-46b2-80ca-e86d1a417b16, infoPort=45575, infoSecurePort=0, ipcPort=41449, storageInfo=lv=-57;cid=testClusterID;nsid=1122233650;c=1731955769491) 2024-11-18T18:49:31,135 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xebce6c9ecfa74887 with lease ID 0x6508f68420e1300d: from storage DS-4498dc31-fe77-4b15-b0e4-6a77d10514e5 node DatanodeRegistration(127.0.0.1:39443, datanodeUuid=eef88227-c2e2-46b2-80ca-e86d1a417b16, infoPort=45575, infoSecurePort=0, ipcPort=41449, storageInfo=lv=-57;cid=testClusterID;nsid=1122233650;c=1731955769491), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:49:31,247 WARN [Thread-1983 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2/data/data3/current/BP-695063187-172.17.0.2-1731955769491/current, will proceed with Du for space computation calculation, 2024-11-18T18:49:31,248 WARN [Thread-1984 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2/data/data4/current/BP-695063187-172.17.0.2-1731955769491/current, will proceed with Du for space computation calculation, 2024-11-18T18:49:31,262 WARN [Thread-1959 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:49:31,264 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9e03f5ae3ece1d4 with lease ID 0x6508f68420e1300e: Processing first storage report for DS-a89bad21-37ce-4c9a-9911-dfd1b8231e65 from datanode DatanodeRegistration(127.0.0.1:36343, datanodeUuid=0a40822d-7b5f-4544-beb6-7b538ec2ee0a, infoPort=45361, infoSecurePort=0, ipcPort=34261, storageInfo=lv=-57;cid=testClusterID;nsid=1122233650;c=1731955769491) 2024-11-18T18:49:31,264 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9e03f5ae3ece1d4 with lease ID 0x6508f68420e1300e: from storage DS-a89bad21-37ce-4c9a-9911-dfd1b8231e65 node DatanodeRegistration(127.0.0.1:36343, datanodeUuid=0a40822d-7b5f-4544-beb6-7b538ec2ee0a, infoPort=45361, infoSecurePort=0, ipcPort=34261, storageInfo=lv=-57;cid=testClusterID;nsid=1122233650;c=1731955769491), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:49:31,264 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9e03f5ae3ece1d4 with lease ID 0x6508f68420e1300e: Processing first storage report for DS-b92214c3-7b06-4cee-a501-d154cbf7f03c from datanode DatanodeRegistration(127.0.0.1:36343, datanodeUuid=0a40822d-7b5f-4544-beb6-7b538ec2ee0a, infoPort=45361, infoSecurePort=0, ipcPort=34261, storageInfo=lv=-57;cid=testClusterID;nsid=1122233650;c=1731955769491) 2024-11-18T18:49:31,264 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9e03f5ae3ece1d4 with lease ID 0x6508f68420e1300e: from storage DS-b92214c3-7b06-4cee-a501-d154cbf7f03c node DatanodeRegistration(127.0.0.1:36343, datanodeUuid=0a40822d-7b5f-4544-beb6-7b538ec2ee0a, infoPort=45361, infoSecurePort=0, ipcPort=34261, storageInfo=lv=-57;cid=testClusterID;nsid=1122233650;c=1731955769491), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:49:31,366 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6 2024-11-18T18:49:31,385 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2/zookeeper_0, clientPort=50680, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T18:49:31,387 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50680 2024-11-18T18:49:31,387 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:49:31,390 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:49:31,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:49:31,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:49:31,402 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864 with version=8 2024-11-18T18:49:31,402 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/hbase-staging 2024-11-18T18:49:31,404 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:49:31,404 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:49:31,404 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:49:31,404 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:49:31,404 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:49:31,404 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:49:31,404 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T18:49:31,404 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:49:31,405 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38439 2024-11-18T18:49:31,406 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38439 connecting to ZooKeeper ensemble=127.0.0.1:50680 2024-11-18T18:49:31,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:31,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:31,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:384390x0, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:49:31,494 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38439-0x101509179df0000 connected 2024-11-18T18:49:31,590 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:49:31,592 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:49:31,593 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:49:31,594 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864, hbase.cluster.distributed=false 2024-11-18T18:49:31,595 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:49:31,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38439 2024-11-18T18:49:31,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38439 2024-11-18T18:49:31,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38439 2024-11-18T18:49:31,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38439 2024-11-18T18:49:31,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38439 2024-11-18T18:49:31,609 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:49:31,609 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:49:31,609 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:49:31,609 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:49:31,609 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:49:31,610 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:49:31,610 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T18:49:31,610 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:49:31,610 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38211 2024-11-18T18:49:31,612 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38211 connecting to ZooKeeper ensemble=127.0.0.1:50680 2024-11-18T18:49:31,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:49:31,613 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:49:31,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:382110x0, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:49:31,624 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:382110x0, quorum=127.0.0.1:50680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:49:31,624 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38211-0x101509179df0001 connected 2024-11-18T18:49:31,624 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T18:49:31,626 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T18:49:31,627 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T18:49:31,627 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:49:31,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38211 2024-11-18T18:49:31,629 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38211 2024-11-18T18:49:31,630 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38211 2024-11-18T18:49:31,631 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38211 2024-11-18T18:49:31,635 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38211 2024-11-18T18:49:31,650 DEBUG [M:0;39fff3b0f89c:38439 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39fff3b0f89c:38439 2024-11-18T18:49:31,650 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39fff3b0f89c,38439,1731955771403 2024-11-18T18:49:31,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:49:31,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:49:31,657 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39fff3b0f89c,38439,1731955771403 2024-11-18T18:49:31,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T18:49:31,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:31,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:31,666 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T18:49:31,666 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39fff3b0f89c,38439,1731955771403 from backup master directory 2024-11-18T18:49:31,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39fff3b0f89c,38439,1731955771403 2024-11-18T18:49:31,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:49:31,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:49:31,674 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:49:31,674 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39fff3b0f89c,38439,1731955771403 2024-11-18T18:49:31,678 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/hbase.id] with ID: 84ea109b-6f1c-45cd-9e14-97d978493b61 2024-11-18T18:49:31,678 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/.tmp/hbase.id 2024-11-18T18:49:31,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:49:31,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:49:31,684 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/.tmp/hbase.id]:[hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/hbase.id] 2024-11-18T18:49:31,694 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:49:31,695 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T18:49:31,696 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T18:49:31,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:31,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:31,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:49:31,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:49:31,714 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:49:31,715 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T18:49:31,715 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:49:31,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:49:31,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:49:31,722 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store 2024-11-18T18:49:31,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:49:31,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:49:31,730 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:49:31,730 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:49:31,730 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:49:31,730 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:49:31,730 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:49:31,730 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:49:31,730 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:49:31,730 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955771730Disabling compacts and flushes for region at 1731955771730Disabling writes for close at 1731955771730Writing region close event to WAL at 1731955771730Closed at 1731955771730 2024-11-18T18:49:31,731 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/.initializing 2024-11-18T18:49:31,731 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/WALs/39fff3b0f89c,38439,1731955771403 2024-11-18T18:49:31,733 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C38439%2C1731955771403, suffix=, logDir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/WALs/39fff3b0f89c,38439,1731955771403, archiveDir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/oldWALs, maxLogs=10 2024-11-18T18:49:31,734 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C38439%2C1731955771403.1731955771733 2024-11-18T18:49:31,738 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/WALs/39fff3b0f89c,38439,1731955771403/39fff3b0f89c%2C38439%2C1731955771403.1731955771733 2024-11-18T18:49:31,739 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45361:45361),(127.0.0.1/127.0.0.1:45575:45575)] 2024-11-18T18:49:31,740 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:49:31,740 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:49:31,740 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,741 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,743 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T18:49:31,745 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:31,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:49:31,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T18:49:31,746 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:31,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:49:31,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T18:49:31,748 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:31,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:49:31,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,749 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T18:49:31,749 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:31,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:49:31,750 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,750 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,750 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,752 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,752 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,752 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T18:49:31,753 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:49:31,759 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:49:31,759 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774406, jitterRate=-0.015292227268218994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T18:49:31,760 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731955771741Initializing all the Stores at 1731955771741Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955771741Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955771743 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955771743Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955771743Cleaning up temporary data from old regions at 1731955771752 (+9 ms)Region opened successfully at 1731955771760 (+8 ms) 2024-11-18T18:49:31,760 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T18:49:31,763 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56699f9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:49:31,764 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T18:49:31,764 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T18:49:31,764 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T18:49:31,764 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T18:49:31,765 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T18:49:31,765 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T18:49:31,765 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T18:49:31,770 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T18:49:31,771 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T18:49:31,778 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T18:49:31,779 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T18:49:31,779 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T18:49:31,787 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T18:49:31,787 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T18:49:31,788 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T18:49:31,795 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T18:49:31,796 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T18:49:31,803 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T18:49:31,805 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T18:49:31,815 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T18:49:31,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:49:31,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:49:31,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:31,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:31,824 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39fff3b0f89c,38439,1731955771403, sessionid=0x101509179df0000, setting cluster-up flag (Was=false) 2024-11-18T18:49:31,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:31,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:31,865 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T18:49:31,866 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,38439,1731955771403 2024-11-18T18:49:31,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:31,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:31,907 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T18:49:31,908 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,38439,1731955771403 2024-11-18T18:49:31,910 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T18:49:31,911 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T18:49:31,911 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T18:49:31,911 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T18:49:31,911 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39fff3b0f89c,38439,1731955771403 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T18:49:31,913 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:49:31,913 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:49:31,913 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:49:31,913 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:49:31,913 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39fff3b0f89c:0, corePoolSize=10, maxPoolSize=10 2024-11-18T18:49:31,913 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,913 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:49:31,913 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,915 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731955801915 2024-11-18T18:49:31,916 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T18:49:31,916 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T18:49:31,916 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T18:49:31,916 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T18:49:31,916 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T18:49:31,916 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:49:31,916 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T18:49:31,916 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T18:49:31,916 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:31,916 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T18:49:31,916 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T18:49:31,917 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T18:49:31,917 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T18:49:31,917 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T18:49:31,917 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:31,917 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955771917,5,FailOnTimeoutGroup] 2024-11-18T18:49:31,917 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T18:49:31,918 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955771917,5,FailOnTimeoutGroup] 2024-11-18T18:49:31,918 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:31,918 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T18:49:31,918 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:31,918 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:31,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:49:31,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:49:31,925 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T18:49:31,925 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864 2024-11-18T18:49:31,951 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(746): ClusterId : 84ea109b-6f1c-45cd-9e14-97d978493b61 2024-11-18T18:49:31,951 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T18:49:31,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:49:31,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:49:31,953 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:49:31,954 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:49:31,955 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:49:31,955 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:31,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:49:31,955 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:49:31,956 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:49:31,956 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:31,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:49:31,957 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:49:31,958 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T18:49:31,958 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T18:49:31,958 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:49:31,958 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:31,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:49:31,959 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:49:31,959 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:49:31,960 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:31,960 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:49:31,960 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:49:31,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740 2024-11-18T18:49:31,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740 2024-11-18T18:49:31,962 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:49:31,962 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:49:31,962 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:49:31,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:49:31,965 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:49:31,965 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=699943, jitterRate=-0.10997684299945831}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:49:31,966 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731955771953Initializing all the Stores at 1731955771954 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955771954Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955771954Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955771954Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955771954Cleaning up temporary data from old regions at 1731955771962 (+8 ms)Region opened successfully at 1731955771966 (+4 ms) 2024-11-18T18:49:31,966 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T18:49:31,966 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:49:31,966 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:49:31,966 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:49:31,966 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:49:31,966 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:49:31,966 DEBUG [RS:0;39fff3b0f89c:38211 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28134566, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:49:31,966 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:49:31,966 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955771966Disabling compacts and flushes for region at 1731955771966Disabling writes for close at 1731955771966Writing region close event to WAL at 1731955771966Closed at 1731955771966 2024-11-18T18:49:31,968 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:49:31,968 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T18:49:31,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T18:49:31,970 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:49:31,971 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T18:49:31,979 DEBUG [RS:0;39fff3b0f89c:38211 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39fff3b0f89c:38211 2024-11-18T18:49:31,979 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T18:49:31,979 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T18:49:31,979 DEBUG [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T18:49:31,979 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(2659): reportForDuty to master=39fff3b0f89c,38439,1731955771403 with port=38211, startcode=1731955771609 2024-11-18T18:49:31,980 DEBUG [RS:0;39fff3b0f89c:38211 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T18:49:31,981 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57575, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T18:49:31,982 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38439 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:31,982 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38439 {}] master.ServerManager(517): Registering regionserver=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:31,983 DEBUG [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864 2024-11-18T18:49:31,983 DEBUG [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38889 2024-11-18T18:49:31,983 DEBUG [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T18:49:31,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:49:31,991 DEBUG [RS:0;39fff3b0f89c:38211 {}] zookeeper.ZKUtil(111): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:31,991 WARN [RS:0;39fff3b0f89c:38211 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:49:31,991 INFO [RS:0;39fff3b0f89c:38211 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:49:31,991 DEBUG [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:31,991 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39fff3b0f89c,38211,1731955771609] 2024-11-18T18:49:31,994 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T18:49:31,995 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T18:49:31,996 INFO [RS:0;39fff3b0f89c:38211 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T18:49:31,996 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:31,996 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T18:49:31,997 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T18:49:31,997 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:49:31,997 DEBUG [RS:0;39fff3b0f89c:38211 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:49:31,998 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:31,998 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:31,998 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:31,998 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:31,998 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:31,998 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38211,1731955771609-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:49:32,011 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T18:49:32,011 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38211,1731955771609-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:32,011 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:32,011 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.Replication(171): 39fff3b0f89c,38211,1731955771609 started 2024-11-18T18:49:32,022 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:32,022 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(1482): Serving as 39fff3b0f89c,38211,1731955771609, RpcServer on 39fff3b0f89c/172.17.0.2:38211, sessionid=0x101509179df0001 2024-11-18T18:49:32,022 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T18:49:32,022 DEBUG [RS:0;39fff3b0f89c:38211 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:32,022 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,38211,1731955771609' 2024-11-18T18:49:32,022 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T18:49:32,023 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T18:49:32,023 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T18:49:32,023 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T18:49:32,023 DEBUG [RS:0;39fff3b0f89c:38211 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:32,023 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,38211,1731955771609' 2024-11-18T18:49:32,023 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T18:49:32,024 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T18:49:32,024 DEBUG [RS:0;39fff3b0f89c:38211 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T18:49:32,024 INFO [RS:0;39fff3b0f89c:38211 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T18:49:32,024 INFO [RS:0;39fff3b0f89c:38211 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T18:49:32,121 WARN [39fff3b0f89c:38439 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-18T18:49:32,126 INFO [RS:0;39fff3b0f89c:38211 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C38211%2C1731955771609, suffix=, logDir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609, archiveDir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/oldWALs, maxLogs=32 2024-11-18T18:49:32,126 INFO [RS:0;39fff3b0f89c:38211 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C38211%2C1731955771609.1731955772126 2024-11-18T18:49:32,133 INFO [RS:0;39fff3b0f89c:38211 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609/39fff3b0f89c%2C38211%2C1731955771609.1731955772126 2024-11-18T18:49:32,135 DEBUG [RS:0;39fff3b0f89c:38211 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45361:45361),(127.0.0.1/127.0.0.1:45575:45575)] 2024-11-18T18:49:32,371 DEBUG [39fff3b0f89c:38439 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T18:49:32,372 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:32,373 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,38211,1731955771609, state=OPENING 2024-11-18T18:49:32,387 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T18:49:32,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:32,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:49:32,396 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:49:32,396 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:49:32,396 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:49:32,396 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,38211,1731955771609}] 2024-11-18T18:49:32,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:32,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:32,549 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T18:49:32,551 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51583, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T18:49:32,555 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T18:49:32,555 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:49:32,557 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C38211%2C1731955771609.meta, suffix=.meta, logDir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609, archiveDir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/oldWALs, maxLogs=32 2024-11-18T18:49:32,558 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C38211%2C1731955771609.meta.1731955772558.meta 2024-11-18T18:49:32,568 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609/39fff3b0f89c%2C38211%2C1731955771609.meta.1731955772558.meta 2024-11-18T18:49:32,572 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45575:45575),(127.0.0.1/127.0.0.1:45361:45361)] 2024-11-18T18:49:32,575 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:49:32,575 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T18:49:32,575 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T18:49:32,575 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T18:49:32,575 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T18:49:32,575 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:49:32,576 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T18:49:32,576 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T18:49:32,579 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:49:32,580 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:49:32,581 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:32,581 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:49:32,581 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:49:32,582 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:49:32,582 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:32,583 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:49:32,583 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:49:32,584 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:49:32,584 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:32,584 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:49:32,584 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:49:32,585 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:49:32,585 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:32,586 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:49:32,586 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:49:32,587 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740 2024-11-18T18:49:32,588 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740 2024-11-18T18:49:32,590 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:49:32,590 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:49:32,590 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:49:32,592 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:49:32,593 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694350, jitterRate=-0.11708861589431763}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:49:32,593 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T18:49:32,594 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731955772576Writing region info on filesystem at 1731955772576Initializing all the Stores at 1731955772577 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955772577Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955772579 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955772579Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955772579Cleaning up temporary data from old regions at 1731955772590 (+11 ms)Running coprocessor post-open hooks at 1731955772593 (+3 ms)Region opened successfully at 1731955772594 (+1 ms) 2024-11-18T18:49:32,595 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731955772549 2024-11-18T18:49:32,598 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T18:49:32,598 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T18:49:32,599 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:32,600 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,38211,1731955771609, state=OPEN 2024-11-18T18:49:32,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:49:32,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:49:32,635 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:49:32,635 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:49:32,635 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:32,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T18:49:32,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,38211,1731955771609 in 239 msec 2024-11-18T18:49:32,641 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T18:49:32,641 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 670 msec 2024-11-18T18:49:32,642 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:49:32,642 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T18:49:32,643 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:49:32,643 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,38211,1731955771609, seqNum=-1] 2024-11-18T18:49:32,643 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:49:32,645 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49149, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:49:32,650 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 739 msec 2024-11-18T18:49:32,651 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731955772651, completionTime=-1 2024-11-18T18:49:32,651 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T18:49:32,651 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-18T18:49:32,653 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-18T18:49:32,653 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731955832653 2024-11-18T18:49:32,653 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731955892653 2024-11-18T18:49:32,653 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-18T18:49:32,654 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38439,1731955771403-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:32,654 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38439,1731955771403-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:32,654 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38439,1731955771403-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:32,654 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39fff3b0f89c:38439, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:32,654 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:32,654 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:32,656 DEBUG [master/39fff3b0f89c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T18:49:32,658 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.984sec 2024-11-18T18:49:32,658 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T18:49:32,658 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T18:49:32,658 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T18:49:32,658 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T18:49:32,658 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T18:49:32,658 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38439,1731955771403-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:49:32,659 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38439,1731955771403-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T18:49:32,661 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T18:49:32,661 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T18:49:32,661 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,38439,1731955771403-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:49:32,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17b69aa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:49:32,751 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39fff3b0f89c,38439,-1 for getting cluster id 2024-11-18T18:49:32,752 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T18:49:32,754 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '84ea109b-6f1c-45cd-9e14-97d978493b61' 2024-11-18T18:49:32,754 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T18:49:32,754 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "84ea109b-6f1c-45cd-9e14-97d978493b61" 2024-11-18T18:49:32,754 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7663cf80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:49:32,754 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39fff3b0f89c,38439,-1] 2024-11-18T18:49:32,755 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T18:49:32,755 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:49:32,756 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59530, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T18:49:32,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@95994a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:49:32,758 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:49:32,759 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,38211,1731955771609, seqNum=-1] 2024-11-18T18:49:32,760 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:49:32,761 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58364, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:49:32,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39fff3b0f89c,38439,1731955771403 2024-11-18T18:49:32,764 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:49:32,767 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T18:49:32,767 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T18:49:32,769 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 39fff3b0f89c,38439,1731955771403 2024-11-18T18:49:32,769 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@67eda5ff 2024-11-18T18:49:32,769 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T18:49:32,771 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59536, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T18:49:32,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38439 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T18:49:32,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38439 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T18:49:32,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38439 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:49:32,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38439 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-18T18:49:32,776 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T18:49:32,776 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:32,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38439 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-18T18:49:32,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38439 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T18:49:32,778 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T18:49:32,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741835_1011 (size=381) 2024-11-18T18:49:32,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741835_1011 (size=381) 2024-11-18T18:49:32,796 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 59f804024a95bdb9d0c6e5330de69db8, NAME => 'TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864 2024-11-18T18:49:32,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741836_1012 (size=64) 2024-11-18T18:49:32,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741836_1012 (size=64) 2024-11-18T18:49:32,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:49:32,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 59f804024a95bdb9d0c6e5330de69db8, disabling compactions & flushes 2024-11-18T18:49:32,810 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:32,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:32,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. after waiting 0 ms 2024-11-18T18:49:32,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:32,810 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:32,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 59f804024a95bdb9d0c6e5330de69db8: Waiting for close lock at 1731955772810Disabling compacts and flushes for region at 1731955772810Disabling writes for close at 1731955772810Writing region close event to WAL at 1731955772810Closed at 1731955772810 2024-11-18T18:49:32,812 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T18:49:32,812 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731955772812"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731955772812"}]},"ts":"1731955772812"} 2024-11-18T18:49:32,815 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T18:49:32,816 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T18:49:32,817 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731955772816"}]},"ts":"1731955772816"} 2024-11-18T18:49:32,821 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-18T18:49:32,822 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59f804024a95bdb9d0c6e5330de69db8, ASSIGN}] 2024-11-18T18:49:32,824 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59f804024a95bdb9d0c6e5330de69db8, ASSIGN 2024-11-18T18:49:32,825 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59f804024a95bdb9d0c6e5330de69db8, ASSIGN; state=OFFLINE, location=39fff3b0f89c,38211,1731955771609; forceNewPlan=false, retain=false 2024-11-18T18:49:32,976 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=59f804024a95bdb9d0c6e5330de69db8, regionState=OPENING, regionLocation=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:32,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59f804024a95bdb9d0c6e5330de69db8, ASSIGN because future has completed 2024-11-18T18:49:32,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 59f804024a95bdb9d0c6e5330de69db8, server=39fff3b0f89c,38211,1731955771609}] 2024-11-18T18:49:33,136 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:33,136 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 59f804024a95bdb9d0c6e5330de69db8, NAME => 'TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:49:33,137 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:33,137 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:49:33,137 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:33,137 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:33,139 INFO [StoreOpener-59f804024a95bdb9d0c6e5330de69db8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:33,140 INFO [StoreOpener-59f804024a95bdb9d0c6e5330de69db8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59f804024a95bdb9d0c6e5330de69db8 columnFamilyName info 2024-11-18T18:49:33,140 DEBUG [StoreOpener-59f804024a95bdb9d0c6e5330de69db8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:33,141 INFO [StoreOpener-59f804024a95bdb9d0c6e5330de69db8-1 {}] regionserver.HStore(327): Store=59f804024a95bdb9d0c6e5330de69db8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:49:33,141 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:33,142 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:33,142 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:33,143 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:33,143 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:33,145 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:33,148 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:49:33,148 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 59f804024a95bdb9d0c6e5330de69db8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=827908, jitterRate=0.05274061858654022}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T18:49:33,148 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:33,149 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 59f804024a95bdb9d0c6e5330de69db8: Running coprocessor pre-open hook at 1731955773137Writing region info on filesystem at 1731955773137Initializing all the Stores at 1731955773138 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955773138Cleaning up temporary data from old regions at 1731955773143 (+5 ms)Running coprocessor post-open hooks at 1731955773148 (+5 ms)Region opened successfully at 1731955773149 (+1 ms) 2024-11-18T18:49:33,150 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8., pid=6, masterSystemTime=1731955773132 2024-11-18T18:49:33,155 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:33,155 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:33,155 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=59f804024a95bdb9d0c6e5330de69db8, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:33,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 59f804024a95bdb9d0c6e5330de69db8, server=39fff3b0f89c,38211,1731955771609 because future has completed 2024-11-18T18:49:33,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T18:49:33,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 59f804024a95bdb9d0c6e5330de69db8, server=39fff3b0f89c,38211,1731955771609 in 184 msec 2024-11-18T18:49:33,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T18:49:33,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59f804024a95bdb9d0c6e5330de69db8, ASSIGN in 344 msec 2024-11-18T18:49:33,171 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T18:49:33,172 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731955773172"}]},"ts":"1731955773172"} 2024-11-18T18:49:33,175 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-18T18:49:33,177 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T18:49:33,179 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 405 msec 2024-11-18T18:49:33,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:33,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:33,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:33,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,458 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T18:49:34,459 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:34,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:34,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,484 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,485 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,488 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:34,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:35,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:35,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:36,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:36,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:37,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:37,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:37,994 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T18:49:37,995 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-18T18:49:38,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:38,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:39,034 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T18:49:39,034 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T18:49:39,035 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:49:39,035 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T18:49:39,035 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-18T18:49:39,035 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T18:49:39,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:39,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:40,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:40,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:41,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:41,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:42,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:42,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38439 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T18:49:42,860 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-18T18:49:42,860 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-18T18:49:42,868 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-18T18:49:42,868 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:42,872 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8., hostname=39fff3b0f89c,38211,1731955771609, seqNum=2] 2024-11-18T18:49:42,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:42,886 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59f804024a95bdb9d0c6e5330de69db8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T18:49:42,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/aae8e234a30645a6b2ed79186e28cb4f is 1080, key is row0001/info:/1731955782874/Put/seqid=0 2024-11-18T18:49:42,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741837_1013 (size=12509) 2024-11-18T18:49:42,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741837_1013 (size=12509) 2024-11-18T18:49:42,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/aae8e234a30645a6b2ed79186e28cb4f 2024-11-18T18:49:42,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/aae8e234a30645a6b2ed79186e28cb4f as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/aae8e234a30645a6b2ed79186e28cb4f 2024-11-18T18:49:42,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/aae8e234a30645a6b2ed79186e28cb4f, entries=7, sequenceid=11, filesize=12.2 K 2024-11-18T18:49:42,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 59f804024a95bdb9d0c6e5330de69db8 in 42ms, sequenceid=11, compaction requested=false 2024-11-18T18:49:42,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59f804024a95bdb9d0c6e5330de69db8: 2024-11-18T18:49:42,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:42,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59f804024a95bdb9d0c6e5330de69db8 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-11-18T18:49:42,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/cfc299a024e943c38393b1f76966f3ff is 1080, key is row0008/info:/1731955782887/Put/seqid=0 2024-11-18T18:49:42,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741838_1014 (size=27607) 2024-11-18T18:49:42,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741838_1014 (size=27607) 2024-11-18T18:49:42,940 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/cfc299a024e943c38393b1f76966f3ff 2024-11-18T18:49:42,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/cfc299a024e943c38393b1f76966f3ff as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/cfc299a024e943c38393b1f76966f3ff 2024-11-18T18:49:42,949 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/cfc299a024e943c38393b1f76966f3ff, entries=21, sequenceid=35, filesize=27.0 K 2024-11-18T18:49:42,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=4.20 KB/4304 for 59f804024a95bdb9d0c6e5330de69db8 in 19ms, sequenceid=35, compaction requested=false 2024-11-18T18:49:42,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59f804024a95bdb9d0c6e5330de69db8: 2024-11-18T18:49:42,950 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.2 K, sizeToCheck=16.0 K 2024-11-18T18:49:42,950 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:42,950 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/cfc299a024e943c38393b1f76966f3ff because midkey is the same as first or last row 2024-11-18T18:49:43,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:43,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:44,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:44,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:44,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:44,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59f804024a95bdb9d0c6e5330de69db8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T18:49:44,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/eda1d84650f54fd7a9340346d9c60030 is 1080, key is row0029/info:/1731955782931/Put/seqid=0 2024-11-18T18:49:44,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741839_1015 (size=12509) 2024-11-18T18:49:44,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741839_1015 (size=12509) 2024-11-18T18:49:44,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/eda1d84650f54fd7a9340346d9c60030 2024-11-18T18:49:44,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/eda1d84650f54fd7a9340346d9c60030 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/eda1d84650f54fd7a9340346d9c60030 2024-11-18T18:49:44,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/eda1d84650f54fd7a9340346d9c60030, entries=7, sequenceid=45, filesize=12.2 K 2024-11-18T18:49:44,978 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 59f804024a95bdb9d0c6e5330de69db8 in 27ms, sequenceid=45, compaction requested=true 2024-11-18T18:49:44,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59f804024a95bdb9d0c6e5330de69db8: 2024-11-18T18:49:44,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:44,979 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=51.4 K, sizeToCheck=16.0 K 2024-11-18T18:49:44,979 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:44,979 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/cfc299a024e943c38393b1f76966f3ff because midkey is the same as first or last row 2024-11-18T18:49:44,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59f804024a95bdb9d0c6e5330de69db8:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:49:44,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:49:44,979 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:49:44,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59f804024a95bdb9d0c6e5330de69db8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T18:49:44,980 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 52625 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:49:44,981 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1541): 59f804024a95bdb9d0c6e5330de69db8/info is initiating minor compaction (all files) 2024-11-18T18:49:44,981 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 59f804024a95bdb9d0c6e5330de69db8/info in TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:44,981 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/aae8e234a30645a6b2ed79186e28cb4f, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/cfc299a024e943c38393b1f76966f3ff, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/eda1d84650f54fd7a9340346d9c60030] into tmpdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp, totalSize=51.4 K 2024-11-18T18:49:44,981 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting aae8e234a30645a6b2ed79186e28cb4f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731955782874 2024-11-18T18:49:44,981 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting cfc299a024e943c38393b1f76966f3ff, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=35, earliestPutTs=1731955782887 2024-11-18T18:49:44,982 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting eda1d84650f54fd7a9340346d9c60030, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731955782931 2024-11-18T18:49:44,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/dcdb78a6a7554bc1abde962e08d16afe is 1080, key is row0036/info:/1731955784952/Put/seqid=0 2024-11-18T18:49:44,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741840_1016 (size=16817) 2024-11-18T18:49:44,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741840_1016 (size=16817) 2024-11-18T18:49:44,989 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/dcdb78a6a7554bc1abde962e08d16afe 2024-11-18T18:49:44,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/dcdb78a6a7554bc1abde962e08d16afe as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/dcdb78a6a7554bc1abde962e08d16afe 2024-11-18T18:49:45,000 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59f804024a95bdb9d0c6e5330de69db8#info#compaction#59 average throughput is 11.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:49:45,000 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/a81ce3655f2d4dadb738a8755162f976 is 1080, key is row0001/info:/1731955782874/Put/seqid=0 2024-11-18T18:49:45,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/dcdb78a6a7554bc1abde962e08d16afe, entries=11, sequenceid=59, filesize=16.4 K 2024-11-18T18:49:45,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 59f804024a95bdb9d0c6e5330de69db8 in 26ms, sequenceid=59, compaction requested=false 2024-11-18T18:49:45,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59f804024a95bdb9d0c6e5330de69db8: 2024-11-18T18:49:45,005 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=67.8 K, sizeToCheck=16.0 K 2024-11-18T18:49:45,005 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:45,005 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/cfc299a024e943c38393b1f76966f3ff because midkey is the same as first or last row 2024-11-18T18:49:45,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:45,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59f804024a95bdb9d0c6e5330de69db8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T18:49:45,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/8bc16eb045b746d1bd5e88e57584b9b5 is 1080, key is row0047/info:/1731955784980/Put/seqid=0 2024-11-18T18:49:45,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741841_1017 (size=42824) 2024-11-18T18:49:45,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741841_1017 (size=42824) 2024-11-18T18:49:45,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741842_1018 (size=16817) 2024-11-18T18:49:45,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741842_1018 (size=16817) 2024-11-18T18:49:45,017 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/8bc16eb045b746d1bd5e88e57584b9b5 2024-11-18T18:49:45,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/8bc16eb045b746d1bd5e88e57584b9b5 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/8bc16eb045b746d1bd5e88e57584b9b5 2024-11-18T18:49:45,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/8bc16eb045b746d1bd5e88e57584b9b5, entries=11, sequenceid=73, filesize=16.4 K 2024-11-18T18:49:45,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=7.36 KB/7532 for 59f804024a95bdb9d0c6e5330de69db8 in 22ms, sequenceid=73, compaction requested=false 2024-11-18T18:49:45,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59f804024a95bdb9d0c6e5330de69db8: 2024-11-18T18:49:45,029 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=84.2 K, sizeToCheck=16.0 K 2024-11-18T18:49:45,029 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:45,029 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/cfc299a024e943c38393b1f76966f3ff because midkey is the same as first or last row 2024-11-18T18:49:45,424 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/a81ce3655f2d4dadb738a8755162f976 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a81ce3655f2d4dadb738a8755162f976 2024-11-18T18:49:45,430 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 59f804024a95bdb9d0c6e5330de69db8/info of 59f804024a95bdb9d0c6e5330de69db8 into a81ce3655f2d4dadb738a8755162f976(size=41.8 K), total size for store is 74.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 59f804024a95bdb9d0c6e5330de69db8: 2024-11-18T18:49:45,430 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8., storeName=59f804024a95bdb9d0c6e5330de69db8/info, priority=13, startTime=1731955784979; duration=0sec 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a81ce3655f2d4dadb738a8755162f976 because midkey is the same as first or last row 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a81ce3655f2d4dadb738a8755162f976 because midkey is the same as first or last row 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a81ce3655f2d4dadb738a8755162f976 because midkey is the same as first or last row 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:49:45,430 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59f804024a95bdb9d0c6e5330de69db8:info 2024-11-18T18:49:45,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:45,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:46,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:46,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:47,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59f804024a95bdb9d0c6e5330de69db8 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-18T18:49:47,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/54e4d4d172d54c54ac6887b668dd81d6 is 1080, key is row0058/info:/1731955785008/Put/seqid=0 2024-11-18T18:49:47,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741843_1019 (size=13586) 2024-11-18T18:49:47,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741843_1019 (size=13586) 2024-11-18T18:49:47,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/54e4d4d172d54c54ac6887b668dd81d6 2024-11-18T18:49:47,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/54e4d4d172d54c54ac6887b668dd81d6 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/54e4d4d172d54c54ac6887b668dd81d6 2024-11-18T18:49:47,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/54e4d4d172d54c54ac6887b668dd81d6, entries=8, sequenceid=85, filesize=13.3 K 2024-11-18T18:49:47,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for 59f804024a95bdb9d0c6e5330de69db8 in 28ms, sequenceid=85, compaction requested=true 2024-11-18T18:49:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59f804024a95bdb9d0c6e5330de69db8: 2024-11-18T18:49:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=87.9 K, sizeToCheck=16.0 K 2024-11-18T18:49:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a81ce3655f2d4dadb738a8755162f976 because midkey is the same as first or last row 2024-11-18T18:49:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59f804024a95bdb9d0c6e5330de69db8:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:49:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:49:47,055 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-18T18:49:47,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,057 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 90044 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-18T18:49:47,057 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1541): 59f804024a95bdb9d0c6e5330de69db8/info is initiating minor compaction (all files) 2024-11-18T18:49:47,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59f804024a95bdb9d0c6e5330de69db8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T18:49:47,057 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 59f804024a95bdb9d0c6e5330de69db8/info in TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:47,057 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a81ce3655f2d4dadb738a8755162f976, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/dcdb78a6a7554bc1abde962e08d16afe, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/8bc16eb045b746d1bd5e88e57584b9b5, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/54e4d4d172d54c54ac6887b668dd81d6] into tmpdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp, totalSize=87.9 K 2024-11-18T18:49:47,058 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting a81ce3655f2d4dadb738a8755162f976, keycount=35, bloomtype=ROW, size=41.8 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731955782874 2024-11-18T18:49:47,058 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting dcdb78a6a7554bc1abde962e08d16afe, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=59, earliestPutTs=1731955784952 2024-11-18T18:49:47,058 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8bc16eb045b746d1bd5e88e57584b9b5, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1731955784980 2024-11-18T18:49:47,059 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 54e4d4d172d54c54ac6887b668dd81d6, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731955785008 2024-11-18T18:49:47,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/00baa93c143c418c86732d69d2924f83 is 1080, key is row0066/info:/1731955787029/Put/seqid=0 2024-11-18T18:49:47,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741844_1020 (size=17894) 2024-11-18T18:49:47,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741844_1020 (size=17894) 2024-11-18T18:49:47,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/00baa93c143c418c86732d69d2924f83 2024-11-18T18:49:47,075 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59f804024a95bdb9d0c6e5330de69db8#info#compaction#63 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:49:47,076 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/6b80dc9f67fd4d0ebb663605cd32e893 is 1080, key is row0001/info:/1731955782874/Put/seqid=0 2024-11-18T18:49:47,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/00baa93c143c418c86732d69d2924f83 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/00baa93c143c418c86732d69d2924f83 2024-11-18T18:49:47,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741845_1021 (size=75412) 2024-11-18T18:49:47,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741845_1021 (size=75412) 2024-11-18T18:49:47,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/00baa93c143c418c86732d69d2924f83, entries=12, sequenceid=100, filesize=17.5 K 2024-11-18T18:49:47,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 59f804024a95bdb9d0c6e5330de69db8 in 26ms, sequenceid=100, compaction requested=false 2024-11-18T18:49:47,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59f804024a95bdb9d0c6e5330de69db8: 2024-11-18T18:49:47,084 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=105.4 K, sizeToCheck=16.0 K 2024-11-18T18:49:47,084 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:47,084 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a81ce3655f2d4dadb738a8755162f976 because midkey is the same as first or last row 2024-11-18T18:49:47,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59f804024a95bdb9d0c6e5330de69db8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T18:49:47,088 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/6b80dc9f67fd4d0ebb663605cd32e893 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6b80dc9f67fd4d0ebb663605cd32e893 2024-11-18T18:49:47,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/6fc9006b9fd84bab9703c89072eae3a6 is 1080, key is row0078/info:/1731955787058/Put/seqid=0 2024-11-18T18:49:47,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741846_1022 (size=16817) 2024-11-18T18:49:47,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741846_1022 (size=16817) 2024-11-18T18:49:47,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/6fc9006b9fd84bab9703c89072eae3a6 2024-11-18T18:49:47,096 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 59f804024a95bdb9d0c6e5330de69db8/info of 59f804024a95bdb9d0c6e5330de69db8 into 6b80dc9f67fd4d0ebb663605cd32e893(size=73.6 K), total size for store is 91.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:49:47,096 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 59f804024a95bdb9d0c6e5330de69db8: 2024-11-18T18:49:47,096 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8., storeName=59f804024a95bdb9d0c6e5330de69db8/info, priority=12, startTime=1731955787055; duration=0sec 2024-11-18T18:49:47,096 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-11-18T18:49:47,096 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:47,096 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-11-18T18:49:47,096 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:47,096 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-11-18T18:49:47,096 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:47,098 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:49:47,098 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:49:47,098 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59f804024a95bdb9d0c6e5330de69db8:info 2024-11-18T18:49:47,099 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38439 {}] assignment.AssignmentManager(1363): Split request from 39fff3b0f89c,38211,1731955771609, parent={ENCODED => 59f804024a95bdb9d0c6e5330de69db8, NAME => 'TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-18T18:49:47,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/6fc9006b9fd84bab9703c89072eae3a6 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6fc9006b9fd84bab9703c89072eae3a6 2024-11-18T18:49:47,105 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38439 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:47,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6fc9006b9fd84bab9703c89072eae3a6, entries=11, sequenceid=114, filesize=16.4 K 2024-11-18T18:49:47,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for 59f804024a95bdb9d0c6e5330de69db8 in 24ms, sequenceid=114, compaction requested=true 2024-11-18T18:49:47,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59f804024a95bdb9d0c6e5330de69db8: 2024-11-18T18:49:47,109 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=107.5 K, sizeToCheck=16.0 K 2024-11-18T18:49:47,109 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:47,108 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38439 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=59f804024a95bdb9d0c6e5330de69db8, daughterA=3f30926e86a799df41ae2228b3086b48, daughterB=f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,109 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=107.5 K, sizeToCheck=16.0 K 2024-11-18T18:49:47,109 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:47,109 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=107.5 K, sizeToCheck=16.0 K 2024-11-18T18:49:47,109 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T18:49:47,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-18T18:49:47,110 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=59f804024a95bdb9d0c6e5330de69db8, daughterA=3f30926e86a799df41ae2228b3086b48, daughterB=f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,110 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=59f804024a95bdb9d0c6e5330de69db8, daughterA=3f30926e86a799df41ae2228b3086b48, daughterB=f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,110 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=59f804024a95bdb9d0c6e5330de69db8, daughterA=3f30926e86a799df41ae2228b3086b48, daughterB=f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,111 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38439 {}] assignment.AssignmentManager(1363): Split request from 39fff3b0f89c,38211,1731955771609, parent={ENCODED => 59f804024a95bdb9d0c6e5330de69db8, NAME => 'TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-18T18:49:47,111 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38439 {}] assignment.AssignmentManager(1378): Ignoring split request from 39fff3b0f89c,38211,1731955771609, parent={ENCODED => 59f804024a95bdb9d0c6e5330de69db8, NAME => 'TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.', STARTKEY => '', ENDKEY => ''} because parent is unknown or not open 2024-11-18T18:49:47,116 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59f804024a95bdb9d0c6e5330de69db8, UNASSIGN}] 2024-11-18T18:49:47,118 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59f804024a95bdb9d0c6e5330de69db8, UNASSIGN 2024-11-18T18:49:47,119 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=59f804024a95bdb9d0c6e5330de69db8, regionState=CLOSING, regionLocation=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:47,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59f804024a95bdb9d0c6e5330de69db8, UNASSIGN because future has completed 2024-11-18T18:49:47,122 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-18T18:49:47,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 59f804024a95bdb9d0c6e5330de69db8, server=39fff3b0f89c,38211,1731955771609}] 2024-11-18T18:49:47,281 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,282 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-18T18:49:47,282 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 59f804024a95bdb9d0c6e5330de69db8, disabling compactions & flushes 2024-11-18T18:49:47,283 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:47,283 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:47,283 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. after waiting 0 ms 2024-11-18T18:49:47,283 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:47,283 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 59f804024a95bdb9d0c6e5330de69db8 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-18T18:49:47,291 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/a088334c04fd4b06a1f4fcaa16f9005e is 1080, key is row0089/info:/1731955787087/Put/seqid=0 2024-11-18T18:49:47,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741847_1023 (size=13586) 2024-11-18T18:49:47,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741847_1023 (size=13586) 2024-11-18T18:49:47,297 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/a088334c04fd4b06a1f4fcaa16f9005e 2024-11-18T18:49:47,303 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/.tmp/info/a088334c04fd4b06a1f4fcaa16f9005e as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a088334c04fd4b06a1f4fcaa16f9005e 2024-11-18T18:49:47,307 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a088334c04fd4b06a1f4fcaa16f9005e, entries=8, sequenceid=126, filesize=13.3 K 2024-11-18T18:49:47,308 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 59f804024a95bdb9d0c6e5330de69db8 in 25ms, sequenceid=126, compaction requested=true 2024-11-18T18:49:47,309 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/aae8e234a30645a6b2ed79186e28cb4f, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/cfc299a024e943c38393b1f76966f3ff, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a81ce3655f2d4dadb738a8755162f976, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/eda1d84650f54fd7a9340346d9c60030, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/dcdb78a6a7554bc1abde962e08d16afe, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/8bc16eb045b746d1bd5e88e57584b9b5, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/54e4d4d172d54c54ac6887b668dd81d6] to archive 2024-11-18T18:49:47,310 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T18:49:47,312 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/aae8e234a30645a6b2ed79186e28cb4f to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/aae8e234a30645a6b2ed79186e28cb4f 2024-11-18T18:49:47,313 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/cfc299a024e943c38393b1f76966f3ff to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/cfc299a024e943c38393b1f76966f3ff 2024-11-18T18:49:47,314 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a81ce3655f2d4dadb738a8755162f976 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a81ce3655f2d4dadb738a8755162f976 2024-11-18T18:49:47,315 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/eda1d84650f54fd7a9340346d9c60030 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/eda1d84650f54fd7a9340346d9c60030 2024-11-18T18:49:47,316 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/dcdb78a6a7554bc1abde962e08d16afe to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/dcdb78a6a7554bc1abde962e08d16afe 2024-11-18T18:49:47,317 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/8bc16eb045b746d1bd5e88e57584b9b5 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/8bc16eb045b746d1bd5e88e57584b9b5 2024-11-18T18:49:47,318 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/54e4d4d172d54c54ac6887b668dd81d6 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/54e4d4d172d54c54ac6887b668dd81d6 2024-11-18T18:49:47,324 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=1 2024-11-18T18:49:47,324 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. 2024-11-18T18:49:47,324 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 59f804024a95bdb9d0c6e5330de69db8: Waiting for close lock at 1731955787282Running coprocessor pre-close hooks at 1731955787282Disabling compacts and flushes for region at 1731955787282Disabling writes for close at 1731955787283 (+1 ms)Obtaining lock to block concurrent updates at 1731955787283Preparing flush snapshotting stores in 59f804024a95bdb9d0c6e5330de69db8 at 1731955787283Finished memstore snapshotting TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8., syncing WAL and waiting on mvcc, flushsize=dataSize=8608, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1731955787283Flushing stores of TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. at 1731955787285 (+2 ms)Flushing 59f804024a95bdb9d0c6e5330de69db8/info: creating writer at 1731955787285Flushing 59f804024a95bdb9d0c6e5330de69db8/info: appending metadata at 1731955787290 (+5 ms)Flushing 59f804024a95bdb9d0c6e5330de69db8/info: closing flushed file at 1731955787291 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@524d3661: reopening flushed file at 1731955787302 (+11 ms)Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 59f804024a95bdb9d0c6e5330de69db8 in 25ms, sequenceid=126, compaction requested=true at 1731955787308 (+6 ms)Writing region close event to WAL at 1731955787320 (+12 ms)Running coprocessor post-close hooks at 1731955787324 (+4 ms)Closed at 1731955787324 2024-11-18T18:49:47,326 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,327 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=59f804024a95bdb9d0c6e5330de69db8, regionState=CLOSED 2024-11-18T18:49:47,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 59f804024a95bdb9d0c6e5330de69db8, server=39fff3b0f89c,38211,1731955771609 because future has completed 2024-11-18T18:49:47,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-18T18:49:47,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 59f804024a95bdb9d0c6e5330de69db8, server=39fff3b0f89c,38211,1731955771609 in 208 msec 2024-11-18T18:49:47,333 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-18T18:49:47,333 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59f804024a95bdb9d0c6e5330de69db8, UNASSIGN in 215 msec 2024-11-18T18:49:47,339 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:47,343 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=59f804024a95bdb9d0c6e5330de69db8, threads=4 2024-11-18T18:49:47,346 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6b80dc9f67fd4d0ebb663605cd32e893 for region: 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,346 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a088334c04fd4b06a1f4fcaa16f9005e for region: 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,346 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/00baa93c143c418c86732d69d2924f83 for region: 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,346 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6fc9006b9fd84bab9703c89072eae3a6 for region: 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,356 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6fc9006b9fd84bab9703c89072eae3a6, top=true 2024-11-18T18:49:47,356 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a088334c04fd4b06a1f4fcaa16f9005e, top=true 2024-11-18T18:49:47,356 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/00baa93c143c418c86732d69d2924f83, top=true 2024-11-18T18:49:47,364 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-00baa93c143c418c86732d69d2924f83 for child: f10d5787b55aa38b294e59af360a1bb7, parent: 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,364 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-a088334c04fd4b06a1f4fcaa16f9005e for child: f10d5787b55aa38b294e59af360a1bb7, parent: 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741848_1024 (size=27) 2024-11-18T18:49:47,364 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/00baa93c143c418c86732d69d2924f83 for region: 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,364 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/a088334c04fd4b06a1f4fcaa16f9005e for region: 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,365 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-6fc9006b9fd84bab9703c89072eae3a6 for child: f10d5787b55aa38b294e59af360a1bb7, parent: 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,365 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6fc9006b9fd84bab9703c89072eae3a6 for region: 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741848_1024 (size=27) 2024-11-18T18:49:47,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741849_1025 (size=27) 2024-11-18T18:49:47,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741849_1025 (size=27) 2024-11-18T18:49:47,379 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6b80dc9f67fd4d0ebb663605cd32e893 for region: 59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:49:47,381 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 59f804024a95bdb9d0c6e5330de69db8 Daughter A: [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8] storefiles, Daughter B: [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-00baa93c143c418c86732d69d2924f83, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-6fc9006b9fd84bab9703c89072eae3a6, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-a088334c04fd4b06a1f4fcaa16f9005e] storefiles. 2024-11-18T18:49:47,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741850_1026 (size=71) 2024-11-18T18:49:47,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741850_1026 (size=71) 2024-11-18T18:49:47,391 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:47,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741851_1027 (size=71) 2024-11-18T18:49:47,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741851_1027 (size=71) 2024-11-18T18:49:47,405 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:47,418 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=-1 2024-11-18T18:49:47,420 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=-1 2024-11-18T18:49:47,422 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731955787422"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731955787422"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731955787422"}]},"ts":"1731955787422"} 2024-11-18T18:49:47,423 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731955787422"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731955787422"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731955787422"}]},"ts":"1731955787422"} 2024-11-18T18:49:47,423 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731955787422"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731955787422"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731955787422"}]},"ts":"1731955787422"} 2024-11-18T18:49:47,440 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3f30926e86a799df41ae2228b3086b48, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f10d5787b55aa38b294e59af360a1bb7, ASSIGN}] 2024-11-18T18:49:47,441 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3f30926e86a799df41ae2228b3086b48, ASSIGN 2024-11-18T18:49:47,441 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f10d5787b55aa38b294e59af360a1bb7, ASSIGN 2024-11-18T18:49:47,442 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3f30926e86a799df41ae2228b3086b48, ASSIGN; state=SPLITTING_NEW, location=39fff3b0f89c,38211,1731955771609; forceNewPlan=false, retain=false 2024-11-18T18:49:47,442 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f10d5787b55aa38b294e59af360a1bb7, ASSIGN; state=SPLITTING_NEW, location=39fff3b0f89c,38211,1731955771609; forceNewPlan=false, retain=false 2024-11-18T18:49:47,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:47,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:47,593 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=f10d5787b55aa38b294e59af360a1bb7, regionState=OPENING, regionLocation=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:47,593 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=3f30926e86a799df41ae2228b3086b48, regionState=OPENING, regionLocation=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:47,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3f30926e86a799df41ae2228b3086b48, ASSIGN because future has completed 2024-11-18T18:49:47,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3f30926e86a799df41ae2228b3086b48, server=39fff3b0f89c,38211,1731955771609}] 2024-11-18T18:49:47,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f10d5787b55aa38b294e59af360a1bb7, ASSIGN because future has completed 2024-11-18T18:49:47,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure f10d5787b55aa38b294e59af360a1bb7, server=39fff3b0f89c,38211,1731955771609}] 2024-11-18T18:49:47,756 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48. 2024-11-18T18:49:47,756 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 3f30926e86a799df41ae2228b3086b48, NAME => 'TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-18T18:49:47,757 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 3f30926e86a799df41ae2228b3086b48 2024-11-18T18:49:47,757 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:49:47,757 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 3f30926e86a799df41ae2228b3086b48 2024-11-18T18:49:47,757 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 3f30926e86a799df41ae2228b3086b48 2024-11-18T18:49:47,760 INFO [StoreOpener-3f30926e86a799df41ae2228b3086b48-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3f30926e86a799df41ae2228b3086b48 2024-11-18T18:49:47,761 INFO [StoreOpener-3f30926e86a799df41ae2228b3086b48-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f30926e86a799df41ae2228b3086b48 columnFamilyName info 2024-11-18T18:49:47,762 DEBUG [StoreOpener-3f30926e86a799df41ae2228b3086b48-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:47,778 DEBUG [StoreOpener-3f30926e86a799df41ae2228b3086b48-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8->hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6b80dc9f67fd4d0ebb663605cd32e893-bottom 2024-11-18T18:49:47,779 INFO [StoreOpener-3f30926e86a799df41ae2228b3086b48-1 {}] regionserver.HStore(327): Store=3f30926e86a799df41ae2228b3086b48/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:49:47,779 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 3f30926e86a799df41ae2228b3086b48 2024-11-18T18:49:47,780 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48 2024-11-18T18:49:47,781 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48 2024-11-18T18:49:47,781 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 3f30926e86a799df41ae2228b3086b48 2024-11-18T18:49:47,781 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 3f30926e86a799df41ae2228b3086b48 2024-11-18T18:49:47,783 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 3f30926e86a799df41ae2228b3086b48 2024-11-18T18:49:47,784 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 3f30926e86a799df41ae2228b3086b48; next sequenceid=130; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762757, jitterRate=-0.03010457754135132}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T18:49:47,785 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3f30926e86a799df41ae2228b3086b48 2024-11-18T18:49:47,785 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 3f30926e86a799df41ae2228b3086b48: Running coprocessor pre-open hook at 1731955787758Writing region info on filesystem at 1731955787758Initializing all the Stores at 1731955787759 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955787759Cleaning up temporary data from old regions at 1731955787781 (+22 ms)Running coprocessor post-open hooks at 1731955787785 (+4 ms)Region opened successfully at 1731955787785 2024-11-18T18:49:47,786 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48., pid=12, masterSystemTime=1731955787748 2024-11-18T18:49:47,786 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 3f30926e86a799df41ae2228b3086b48:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:49:47,786 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:49:47,786 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-18T18:49:47,787 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48. 2024-11-18T18:49:47,787 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1541): 3f30926e86a799df41ae2228b3086b48/info is initiating minor compaction (all files) 2024-11-18T18:49:47,787 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3f30926e86a799df41ae2228b3086b48/info in TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48. 2024-11-18T18:49:47,787 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8->hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6b80dc9f67fd4d0ebb663605cd32e893-bottom] into tmpdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/.tmp, totalSize=73.6 K 2024-11-18T18:49:47,788 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731955782874 2024-11-18T18:49:47,788 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48. 2024-11-18T18:49:47,788 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48. 2024-11-18T18:49:47,788 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:49:47,789 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => f10d5787b55aa38b294e59af360a1bb7, NAME => 'TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-18T18:49:47,789 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,789 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:49:47,789 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,789 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,789 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=3f30926e86a799df41ae2228b3086b48, regionState=OPEN, openSeqNum=130, regionLocation=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:47,790 INFO [StoreOpener-f10d5787b55aa38b294e59af360a1bb7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,791 INFO [StoreOpener-f10d5787b55aa38b294e59af360a1bb7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f10d5787b55aa38b294e59af360a1bb7 columnFamilyName info 2024-11-18T18:49:47,791 DEBUG [StoreOpener-f10d5787b55aa38b294e59af360a1bb7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:49:47,791 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-18T18:49:47,791 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-18T18:49:47,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-18T18:49:47,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3f30926e86a799df41ae2228b3086b48, server=39fff3b0f89c,38211,1731955771609 because future has completed 2024-11-18T18:49:47,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-18T18:49:47,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 3f30926e86a799df41ae2228b3086b48, server=39fff3b0f89c,38211,1731955771609 in 198 msec 2024-11-18T18:49:47,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3f30926e86a799df41ae2228b3086b48, ASSIGN in 356 msec 2024-11-18T18:49:47,800 DEBUG [StoreOpener-f10d5787b55aa38b294e59af360a1bb7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8->hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6b80dc9f67fd4d0ebb663605cd32e893-top 2024-11-18T18:49:47,806 DEBUG [StoreOpener-f10d5787b55aa38b294e59af360a1bb7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-00baa93c143c418c86732d69d2924f83 2024-11-18T18:49:47,808 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f30926e86a799df41ae2228b3086b48#info#compaction#66 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:49:47,809 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/.tmp/info/6d37148e762e4534ab70884b5b81d13f is 1080, key is row0001/info:/1731955782874/Put/seqid=0 2024-11-18T18:49:47,810 DEBUG [StoreOpener-f10d5787b55aa38b294e59af360a1bb7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-6fc9006b9fd84bab9703c89072eae3a6 2024-11-18T18:49:47,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/info/855fdfbf22c04a128132c2a028b26be7 is 193, key is TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7./info:regioninfo/1731955787593/Put/seqid=0 2024-11-18T18:49:47,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741852_1028 (size=70862) 2024-11-18T18:49:47,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741852_1028 (size=70862) 2024-11-18T18:49:47,815 DEBUG [StoreOpener-f10d5787b55aa38b294e59af360a1bb7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-a088334c04fd4b06a1f4fcaa16f9005e 2024-11-18T18:49:47,815 INFO [StoreOpener-f10d5787b55aa38b294e59af360a1bb7-1 {}] regionserver.HStore(327): Store=f10d5787b55aa38b294e59af360a1bb7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:49:47,815 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741853_1029 (size=9847) 2024-11-18T18:49:47,816 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741853_1029 (size=9847) 2024-11-18T18:49:47,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/info/855fdfbf22c04a128132c2a028b26be7 2024-11-18T18:49:47,817 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,817 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,817 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,819 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,820 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened f10d5787b55aa38b294e59af360a1bb7; next sequenceid=130; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745716, jitterRate=-0.05177342891693115}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T18:49:47,820 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:47,821 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for f10d5787b55aa38b294e59af360a1bb7: Running coprocessor pre-open hook at 1731955787789Writing region info on filesystem at 1731955787789Initializing all the Stores at 1731955787790 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955787790Cleaning up temporary data from old regions at 1731955787817 (+27 ms)Running coprocessor post-open hooks at 1731955787820 (+3 ms)Region opened successfully at 1731955787821 (+1 ms) 2024-11-18T18:49:47,821 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7., pid=13, masterSystemTime=1731955787748 2024-11-18T18:49:47,822 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store f10d5787b55aa38b294e59af360a1bb7:info, priority=-2147483648, current under compaction store size is 2 2024-11-18T18:49:47,822 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:49:47,822 DEBUG [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-18T18:49:47,823 INFO [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:49:47,823 DEBUG [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] regionserver.HStore(1541): f10d5787b55aa38b294e59af360a1bb7/info is initiating minor compaction (all files) 2024-11-18T18:49:47,823 INFO [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f10d5787b55aa38b294e59af360a1bb7/info in TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:49:47,823 INFO [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8->hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6b80dc9f67fd4d0ebb663605cd32e893-top, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-00baa93c143c418c86732d69d2924f83, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-6fc9006b9fd84bab9703c89072eae3a6, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-a088334c04fd4b06a1f4fcaa16f9005e] into tmpdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp, totalSize=120.8 K 2024-11-18T18:49:47,824 DEBUG [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] compactions.Compactor(225): Compacting 6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1731955782874 2024-11-18T18:49:47,824 DEBUG [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:49:47,824 INFO [RS_OPEN_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:49:47,825 DEBUG [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-00baa93c143c418c86732d69d2924f83, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731955787029 2024-11-18T18:49:47,825 DEBUG [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-6fc9006b9fd84bab9703c89072eae3a6, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1731955787058 2024-11-18T18:49:47,825 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=f10d5787b55aa38b294e59af360a1bb7, regionState=OPEN, openSeqNum=130, regionLocation=39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:47,825 DEBUG [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-a088334c04fd4b06a1f4fcaa16f9005e, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1731955787087 2024-11-18T18:49:47,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure f10d5787b55aa38b294e59af360a1bb7, server=39fff3b0f89c,38211,1731955771609 because future has completed 2024-11-18T18:49:47,830 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-18T18:49:47,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure f10d5787b55aa38b294e59af360a1bb7, server=39fff3b0f89c,38211,1731955771609 in 232 msec 2024-11-18T18:49:47,833 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-18T18:49:47,833 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f10d5787b55aa38b294e59af360a1bb7, ASSIGN in 391 msec 2024-11-18T18:49:47,835 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=59f804024a95bdb9d0c6e5330de69db8, daughterA=3f30926e86a799df41ae2228b3086b48, daughterB=f10d5787b55aa38b294e59af360a1bb7 in 728 msec 2024-11-18T18:49:47,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/ns/b217c0ab062145939aa194b76e20a3fe is 43, key is default/ns:d/1731955772645/Put/seqid=0 2024-11-18T18:49:47,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741854_1030 (size=5153) 2024-11-18T18:49:47,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741854_1030 (size=5153) 2024-11-18T18:49:47,843 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/ns/b217c0ab062145939aa194b76e20a3fe 2024-11-18T18:49:47,851 INFO [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f10d5787b55aa38b294e59af360a1bb7#info#compaction#69 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:49:47,852 DEBUG [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/9fe78915310641b996a974db9908909d is 1080, key is row0062/info:/1731955785017/Put/seqid=0 2024-11-18T18:49:47,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741855_1031 (size=43081) 2024-11-18T18:49:47,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741855_1031 (size=43081) 2024-11-18T18:49:47,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/table/87dcbf7914124463897bdef7c778bf8e is 65, key is TestLogRolling-testLogRolling/table:state/1731955773172/Put/seqid=0 2024-11-18T18:49:47,866 DEBUG [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/9fe78915310641b996a974db9908909d as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/9fe78915310641b996a974db9908909d 2024-11-18T18:49:47,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741856_1032 (size=5340) 2024-11-18T18:49:47,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741856_1032 (size=5340) 2024-11-18T18:49:47,868 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/table/87dcbf7914124463897bdef7c778bf8e 2024-11-18T18:49:47,873 INFO [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in f10d5787b55aa38b294e59af360a1bb7/info of f10d5787b55aa38b294e59af360a1bb7 into 9fe78915310641b996a974db9908909d(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:49:47,873 DEBUG [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:49:47,873 INFO [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7., storeName=f10d5787b55aa38b294e59af360a1bb7/info, priority=12, startTime=1731955787821; duration=0sec 2024-11-18T18:49:47,873 DEBUG [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:49:47,873 DEBUG [RS:0;39fff3b0f89c:38211-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f10d5787b55aa38b294e59af360a1bb7:info 2024-11-18T18:49:47,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/info/855fdfbf22c04a128132c2a028b26be7 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/info/855fdfbf22c04a128132c2a028b26be7 2024-11-18T18:49:47,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/info/855fdfbf22c04a128132c2a028b26be7, entries=30, sequenceid=17, filesize=9.6 K 2024-11-18T18:49:47,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/ns/b217c0ab062145939aa194b76e20a3fe as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/ns/b217c0ab062145939aa194b76e20a3fe 2024-11-18T18:49:47,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/ns/b217c0ab062145939aa194b76e20a3fe, entries=2, sequenceid=17, filesize=5.0 K 2024-11-18T18:49:47,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/table/87dcbf7914124463897bdef7c778bf8e as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/table/87dcbf7914124463897bdef7c778bf8e 2024-11-18T18:49:47,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/table/87dcbf7914124463897bdef7c778bf8e, entries=2, sequenceid=17, filesize=5.2 K 2024-11-18T18:49:47,891 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 100ms, sequenceid=17, compaction requested=false 2024-11-18T18:49:47,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T18:49:48,225 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/.tmp/info/6d37148e762e4534ab70884b5b81d13f as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/info/6d37148e762e4534ab70884b5b81d13f 2024-11-18T18:49:48,233 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 3f30926e86a799df41ae2228b3086b48/info of 3f30926e86a799df41ae2228b3086b48 into 6d37148e762e4534ab70884b5b81d13f(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:49:48,233 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3f30926e86a799df41ae2228b3086b48: 2024-11-18T18:49:48,233 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48., storeName=3f30926e86a799df41ae2228b3086b48/info, priority=15, startTime=1731955787786; duration=0sec 2024-11-18T18:49:48,233 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:49:48,233 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f30926e86a799df41ae2228b3086b48:info 2024-11-18T18:49:48,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:48,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:49,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:58364 deadline: 1731955799106, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. is not online on 39fff3b0f89c,38211,1731955771609 2024-11-18T18:49:49,136 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8., hostname=39fff3b0f89c,38211,1731955771609, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8., hostname=39fff3b0f89c,38211,1731955771609, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. is not online on 39fff3b0f89c,38211,1731955771609 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T18:49:49,137 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8., hostname=39fff3b0f89c,38211,1731955771609, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8. is not online on 39fff3b0f89c,38211,1731955771609 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T18:49:49,137 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731955772771.59f804024a95bdb9d0c6e5330de69db8., hostname=39fff3b0f89c,38211,1731955771609, seqNum=2 from cache 2024-11-18T18:49:49,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:49,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:50,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:50,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:51,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:51,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:52,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,353 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,360 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:52,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:52,866 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T18:49:52,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:52,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T18:49:53,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:53,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:54,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:54,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:55,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:55,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:56,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:56,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:57,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:57,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:58,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:58,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:59,204 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7., hostname=39fff3b0f89c,38211,1731955771609, seqNum=130] 2024-11-18T18:49:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:59,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T18:49:59,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/691ed8925c1c439da270184396fddb0f is 1080, key is row0097/info:/1731955799206/Put/seqid=0 2024-11-18T18:49:59,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741857_1033 (size=12516) 2024-11-18T18:49:59,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741857_1033 (size=12516) 2024-11-18T18:49:59,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/691ed8925c1c439da270184396fddb0f 2024-11-18T18:49:59,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/691ed8925c1c439da270184396fddb0f as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/691ed8925c1c439da270184396fddb0f 2024-11-18T18:49:59,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/691ed8925c1c439da270184396fddb0f, entries=7, sequenceid=140, filesize=12.2 K 2024-11-18T18:49:59,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for f10d5787b55aa38b294e59af360a1bb7 in 26ms, sequenceid=140, compaction requested=false 2024-11-18T18:49:59,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:49:59,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:49:59,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-18T18:49:59,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/8e90d530795643e09988ba9385d32bbb is 1080, key is row0104/info:/1731955799218/Put/seqid=0 2024-11-18T18:49:59,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741858_1034 (size=19000) 2024-11-18T18:49:59,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741858_1034 (size=19000) 2024-11-18T18:49:59,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/8e90d530795643e09988ba9385d32bbb 2024-11-18T18:49:59,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/8e90d530795643e09988ba9385d32bbb as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/8e90d530795643e09988ba9385d32bbb 2024-11-18T18:49:59,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/8e90d530795643e09988ba9385d32bbb, entries=13, sequenceid=156, filesize=18.6 K 2024-11-18T18:49:59,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for f10d5787b55aa38b294e59af360a1bb7 in 22ms, sequenceid=156, compaction requested=true 2024-11-18T18:49:59,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:49:59,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f10d5787b55aa38b294e59af360a1bb7:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:49:59,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:49:59,267 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:49:59,268 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74597 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:49:59,268 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1541): f10d5787b55aa38b294e59af360a1bb7/info is initiating minor compaction (all files) 2024-11-18T18:49:59,268 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f10d5787b55aa38b294e59af360a1bb7/info in TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:49:59,268 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/9fe78915310641b996a974db9908909d, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/691ed8925c1c439da270184396fddb0f, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/8e90d530795643e09988ba9385d32bbb] into tmpdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp, totalSize=72.8 K 2024-11-18T18:49:59,268 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9fe78915310641b996a974db9908909d, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1731955785017 2024-11-18T18:49:59,268 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 691ed8925c1c439da270184396fddb0f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1731955799206 2024-11-18T18:49:59,269 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e90d530795643e09988ba9385d32bbb, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731955799218 2024-11-18T18:49:59,278 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f10d5787b55aa38b294e59af360a1bb7#info#compaction#73 average throughput is 56.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:49:59,278 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/021a974e497b4451afed2ec385685bf4 is 1080, key is row0062/info:/1731955785017/Put/seqid=0 2024-11-18T18:49:59,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741859_1035 (size=64811) 2024-11-18T18:49:59,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741859_1035 (size=64811) 2024-11-18T18:49:59,289 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/021a974e497b4451afed2ec385685bf4 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/021a974e497b4451afed2ec385685bf4 2024-11-18T18:49:59,294 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f10d5787b55aa38b294e59af360a1bb7/info of f10d5787b55aa38b294e59af360a1bb7 into 021a974e497b4451afed2ec385685bf4(size=63.3 K), total size for store is 63.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:49:59,294 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:49:59,294 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7., storeName=f10d5787b55aa38b294e59af360a1bb7/info, priority=13, startTime=1731955799267; duration=0sec 2024-11-18T18:49:59,294 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:49:59,294 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f10d5787b55aa38b294e59af360a1bb7:info 2024-11-18T18:49:59,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:49:59,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:00,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:00,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:01,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:01,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-18T18:50:01,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/b0d7750c6cc04fbf8ca3eed0b656511c is 1080, key is row0117/info:/1731955799245/Put/seqid=0 2024-11-18T18:50:01,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741860_1036 (size=19000) 2024-11-18T18:50:01,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741860_1036 (size=19000) 2024-11-18T18:50:01,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/b0d7750c6cc04fbf8ca3eed0b656511c 2024-11-18T18:50:01,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/b0d7750c6cc04fbf8ca3eed0b656511c as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b0d7750c6cc04fbf8ca3eed0b656511c 2024-11-18T18:50:01,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b0d7750c6cc04fbf8ca3eed0b656511c, entries=13, sequenceid=173, filesize=18.6 K 2024-11-18T18:50:01,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for f10d5787b55aa38b294e59af360a1bb7 in 27ms, sequenceid=173, compaction requested=false 2024-11-18T18:50:01,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:01,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T18:50:01,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/069a4c12af5349e3bb3956712d778875 is 1080, key is row0130/info:/1731955801271/Put/seqid=0 2024-11-18T18:50:01,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741861_1037 (size=17906) 2024-11-18T18:50:01,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741861_1037 (size=17906) 2024-11-18T18:50:01,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/069a4c12af5349e3bb3956712d778875 2024-11-18T18:50:01,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/069a4c12af5349e3bb3956712d778875 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/069a4c12af5349e3bb3956712d778875 2024-11-18T18:50:01,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/069a4c12af5349e3bb3956712d778875, entries=12, sequenceid=188, filesize=17.5 K 2024-11-18T18:50:01,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for f10d5787b55aa38b294e59af360a1bb7 in 22ms, sequenceid=188, compaction requested=true 2024-11-18T18:50:01,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:01,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f10d5787b55aa38b294e59af360a1bb7:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:50:01,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:01,322 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:50:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:01,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T18:50:01,323 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101717 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:50:01,323 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1541): f10d5787b55aa38b294e59af360a1bb7/info is initiating minor compaction (all files) 2024-11-18T18:50:01,323 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f10d5787b55aa38b294e59af360a1bb7/info in TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:50:01,323 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/021a974e497b4451afed2ec385685bf4, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b0d7750c6cc04fbf8ca3eed0b656511c, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/069a4c12af5349e3bb3956712d778875] into tmpdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp, totalSize=99.3 K 2024-11-18T18:50:01,324 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 021a974e497b4451afed2ec385685bf4, keycount=55, bloomtype=ROW, size=63.3 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731955785017 2024-11-18T18:50:01,324 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting b0d7750c6cc04fbf8ca3eed0b656511c, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1731955799245 2024-11-18T18:50:01,324 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 069a4c12af5349e3bb3956712d778875, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1731955801271 2024-11-18T18:50:01,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/a44bc257d79e4adfaeff7a3a2923b6a8 is 1080, key is row0142/info:/1731955801300/Put/seqid=0 2024-11-18T18:50:01,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741862_1038 (size=16828) 2024-11-18T18:50:01,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741862_1038 (size=16828) 2024-11-18T18:50:01,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/a44bc257d79e4adfaeff7a3a2923b6a8 2024-11-18T18:50:01,337 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f10d5787b55aa38b294e59af360a1bb7#info#compaction#77 average throughput is 41.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:50:01,338 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/57b6005275b24878bf63000f75d5fc0e is 1080, key is row0062/info:/1731955785017/Put/seqid=0 2024-11-18T18:50:01,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/a44bc257d79e4adfaeff7a3a2923b6a8 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/a44bc257d79e4adfaeff7a3a2923b6a8 2024-11-18T18:50:01,343 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/a44bc257d79e4adfaeff7a3a2923b6a8, entries=11, sequenceid=202, filesize=16.4 K 2024-11-18T18:50:01,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741863_1039 (size=91940) 2024-11-18T18:50:01,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741863_1039 (size=91940) 2024-11-18T18:50:01,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for f10d5787b55aa38b294e59af360a1bb7 in 22ms, sequenceid=202, compaction requested=false 2024-11-18T18:50:01,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:01,349 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/57b6005275b24878bf63000f75d5fc0e as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/57b6005275b24878bf63000f75d5fc0e 2024-11-18T18:50:01,355 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f10d5787b55aa38b294e59af360a1bb7/info of f10d5787b55aa38b294e59af360a1bb7 into 57b6005275b24878bf63000f75d5fc0e(size=89.8 K), total size for store is 106.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:50:01,355 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:01,355 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7., storeName=f10d5787b55aa38b294e59af360a1bb7/info, priority=13, startTime=1731955801322; duration=0sec 2024-11-18T18:50:01,355 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:01,355 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f10d5787b55aa38b294e59af360a1bb7:info 2024-11-18T18:50:01,366 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T18:50:01,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:01,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:02,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:02,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:03,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:03,345 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-18T18:50:03,353 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/b5d8332344c64d898b3df71b19deb38c is 1080, key is row0153/info:/1731955801323/Put/seqid=0 2024-11-18T18:50:03,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741864_1040 (size=14672) 2024-11-18T18:50:03,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741864_1040 (size=14672) 2024-11-18T18:50:03,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/b5d8332344c64d898b3df71b19deb38c 2024-11-18T18:50:03,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/b5d8332344c64d898b3df71b19deb38c as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b5d8332344c64d898b3df71b19deb38c 2024-11-18T18:50:03,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b5d8332344c64d898b3df71b19deb38c, entries=9, sequenceid=215, filesize=14.3 K 2024-11-18T18:50:03,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=10.51 KB/10760 for f10d5787b55aa38b294e59af360a1bb7 in 27ms, sequenceid=215, compaction requested=true 2024-11-18T18:50:03,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:03,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f10d5787b55aa38b294e59af360a1bb7:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:50:03,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:03,373 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:50:03,374 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123440 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:50:03,374 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1541): f10d5787b55aa38b294e59af360a1bb7/info is initiating minor compaction (all files) 2024-11-18T18:50:03,374 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f10d5787b55aa38b294e59af360a1bb7/info in TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:50:03,374 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/57b6005275b24878bf63000f75d5fc0e, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/a44bc257d79e4adfaeff7a3a2923b6a8, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b5d8332344c64d898b3df71b19deb38c] into tmpdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp, totalSize=120.5 K 2024-11-18T18:50:03,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:03,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T18:50:03,375 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 57b6005275b24878bf63000f75d5fc0e, keycount=80, bloomtype=ROW, size=89.8 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1731955785017 2024-11-18T18:50:03,375 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting a44bc257d79e4adfaeff7a3a2923b6a8, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1731955801300 2024-11-18T18:50:03,375 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting b5d8332344c64d898b3df71b19deb38c, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1731955801323 2024-11-18T18:50:03,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/3fc219259d7e4d1992d6d1beae58481b is 1080, key is row0162/info:/1731955803349/Put/seqid=0 2024-11-18T18:50:03,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741865_1041 (size=17906) 2024-11-18T18:50:03,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741865_1041 (size=17906) 2024-11-18T18:50:03,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/3fc219259d7e4d1992d6d1beae58481b 2024-11-18T18:50:03,388 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f10d5787b55aa38b294e59af360a1bb7#info#compaction#80 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:50:03,389 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/82ff5cd18a834449a692b7c755a12b63 is 1080, key is row0062/info:/1731955785017/Put/seqid=0 2024-11-18T18:50:03,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/3fc219259d7e4d1992d6d1beae58481b as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/3fc219259d7e4d1992d6d1beae58481b 2024-11-18T18:50:03,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741866_1042 (size=113606) 2024-11-18T18:50:03,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741866_1042 (size=113606) 2024-11-18T18:50:03,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/3fc219259d7e4d1992d6d1beae58481b, entries=12, sequenceid=230, filesize=17.5 K 2024-11-18T18:50:03,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for f10d5787b55aa38b294e59af360a1bb7 in 22ms, sequenceid=230, compaction requested=false 2024-11-18T18:50:03,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:03,398 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/82ff5cd18a834449a692b7c755a12b63 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/82ff5cd18a834449a692b7c755a12b63 2024-11-18T18:50:03,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:03,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T18:50:03,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/f4c0dcedb9b141a59c26730890b07a27 is 1080, key is row0174/info:/1731955803376/Put/seqid=0 2024-11-18T18:50:03,404 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f10d5787b55aa38b294e59af360a1bb7/info of f10d5787b55aa38b294e59af360a1bb7 into 82ff5cd18a834449a692b7c755a12b63(size=110.9 K), total size for store is 128.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:50:03,404 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:03,404 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7., storeName=f10d5787b55aa38b294e59af360a1bb7/info, priority=13, startTime=1731955803373; duration=0sec 2024-11-18T18:50:03,404 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:03,404 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f10d5787b55aa38b294e59af360a1bb7:info 2024-11-18T18:50:03,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741867_1043 (size=16828) 2024-11-18T18:50:03,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741867_1043 (size=16828) 2024-11-18T18:50:03,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:03,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:03,808 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/f4c0dcedb9b141a59c26730890b07a27 2024-11-18T18:50:03,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/f4c0dcedb9b141a59c26730890b07a27 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/f4c0dcedb9b141a59c26730890b07a27 2024-11-18T18:50:03,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/f4c0dcedb9b141a59c26730890b07a27, entries=11, sequenceid=244, filesize=16.4 K 2024-11-18T18:50:03,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for f10d5787b55aa38b294e59af360a1bb7 in 426ms, sequenceid=244, compaction requested=true 2024-11-18T18:50:03,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:03,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f10d5787b55aa38b294e59af360a1bb7:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:50:03,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:03,825 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:50:03,826 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 148340 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:50:03,826 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1541): f10d5787b55aa38b294e59af360a1bb7/info is initiating minor compaction (all files) 2024-11-18T18:50:03,826 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f10d5787b55aa38b294e59af360a1bb7/info in TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:50:03,826 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/82ff5cd18a834449a692b7c755a12b63, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/3fc219259d7e4d1992d6d1beae58481b, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/f4c0dcedb9b141a59c26730890b07a27] into tmpdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp, totalSize=144.9 K 2024-11-18T18:50:03,826 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 82ff5cd18a834449a692b7c755a12b63, keycount=100, bloomtype=ROW, size=110.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1731955785017 2024-11-18T18:50:03,827 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3fc219259d7e4d1992d6d1beae58481b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1731955803349 2024-11-18T18:50:03,827 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting f4c0dcedb9b141a59c26730890b07a27, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1731955803376 2024-11-18T18:50:03,837 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f10d5787b55aa38b294e59af360a1bb7#info#compaction#82 average throughput is 63.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:50:03,837 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/190f12c68c8f4cdaaea5c780d64b6b43 is 1080, key is row0062/info:/1731955785017/Put/seqid=0 2024-11-18T18:50:03,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741868_1044 (size=138707) 2024-11-18T18:50:03,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741868_1044 (size=138707) 2024-11-18T18:50:03,847 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/190f12c68c8f4cdaaea5c780d64b6b43 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/190f12c68c8f4cdaaea5c780d64b6b43 2024-11-18T18:50:03,852 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f10d5787b55aa38b294e59af360a1bb7/info of f10d5787b55aa38b294e59af360a1bb7 into 190f12c68c8f4cdaaea5c780d64b6b43(size=135.5 K), total size for store is 135.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:50:03,852 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:03,852 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7., storeName=f10d5787b55aa38b294e59af360a1bb7/info, priority=13, startTime=1731955803825; duration=0sec 2024-11-18T18:50:03,852 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:03,852 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f10d5787b55aa38b294e59af360a1bb7:info 2024-11-18T18:50:04,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:04,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:05,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:05,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-18T18:50:05,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/58e7b36c2acd4419a32abab1db1d6397 is 1080, key is row0185/info:/1731955803399/Put/seqid=0 2024-11-18T18:50:05,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741869_1045 (size=14673) 2024-11-18T18:50:05,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741869_1045 (size=14673) 2024-11-18T18:50:05,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/58e7b36c2acd4419a32abab1db1d6397 2024-11-18T18:50:05,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/58e7b36c2acd4419a32abab1db1d6397 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/58e7b36c2acd4419a32abab1db1d6397 2024-11-18T18:50:05,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/58e7b36c2acd4419a32abab1db1d6397, entries=9, sequenceid=258, filesize=14.3 K 2024-11-18T18:50:05,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=12.61 KB/12912 for f10d5787b55aa38b294e59af360a1bb7 in 28ms, sequenceid=258, compaction requested=false 2024-11-18T18:50:05,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:05,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:05,445 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-18T18:50:05,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/bdc25e6988ba43afa4058419b3e69cda is 1080, key is row0194/info:/1731955805418/Put/seqid=0 2024-11-18T18:50:05,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741870_1046 (size=19013) 2024-11-18T18:50:05,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741870_1046 (size=19013) 2024-11-18T18:50:05,454 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/bdc25e6988ba43afa4058419b3e69cda 2024-11-18T18:50:05,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/bdc25e6988ba43afa4058419b3e69cda as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/bdc25e6988ba43afa4058419b3e69cda 2024-11-18T18:50:05,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/bdc25e6988ba43afa4058419b3e69cda, entries=13, sequenceid=274, filesize=18.6 K 2024-11-18T18:50:05,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for f10d5787b55aa38b294e59af360a1bb7 in 20ms, sequenceid=274, compaction requested=true 2024-11-18T18:50:05,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:05,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f10d5787b55aa38b294e59af360a1bb7:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:50:05,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:05,465 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:50:05,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:05,466 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T18:50:05,466 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 172393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:50:05,466 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1541): f10d5787b55aa38b294e59af360a1bb7/info is initiating minor compaction (all files) 2024-11-18T18:50:05,466 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f10d5787b55aa38b294e59af360a1bb7/info in TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:50:05,467 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/190f12c68c8f4cdaaea5c780d64b6b43, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/58e7b36c2acd4419a32abab1db1d6397, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/bdc25e6988ba43afa4058419b3e69cda] into tmpdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp, totalSize=168.4 K 2024-11-18T18:50:05,467 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 190f12c68c8f4cdaaea5c780d64b6b43, keycount=123, bloomtype=ROW, size=135.5 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1731955785017 2024-11-18T18:50:05,467 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 58e7b36c2acd4419a32abab1db1d6397, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1731955803399 2024-11-18T18:50:05,468 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting bdc25e6988ba43afa4058419b3e69cda, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1731955805418 2024-11-18T18:50:05,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/2f78b2c62c604480884f967c0fcb3e7c is 1080, key is row0207/info:/1731955805446/Put/seqid=0 2024-11-18T18:50:05,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741871_1047 (size=16839) 2024-11-18T18:50:05,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741871_1047 (size=16839) 2024-11-18T18:50:05,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/2f78b2c62c604480884f967c0fcb3e7c 2024-11-18T18:50:05,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/2f78b2c62c604480884f967c0fcb3e7c as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/2f78b2c62c604480884f967c0fcb3e7c 2024-11-18T18:50:05,482 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f10d5787b55aa38b294e59af360a1bb7#info#compaction#86 average throughput is 37.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:50:05,483 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/627385d7504342599f7b4932fa370ca2 is 1080, key is row0062/info:/1731955785017/Put/seqid=0 2024-11-18T18:50:05,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/2f78b2c62c604480884f967c0fcb3e7c, entries=11, sequenceid=288, filesize=16.4 K 2024-11-18T18:50:05,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=7.36 KB/7532 for f10d5787b55aa38b294e59af360a1bb7 in 21ms, sequenceid=288, compaction requested=false 2024-11-18T18:50:05,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:05,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741872_1048 (size=162543) 2024-11-18T18:50:05,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741872_1048 (size=162543) 2024-11-18T18:50:05,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:05,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:05,493 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/627385d7504342599f7b4932fa370ca2 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/627385d7504342599f7b4932fa370ca2 2024-11-18T18:50:05,498 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f10d5787b55aa38b294e59af360a1bb7/info of f10d5787b55aa38b294e59af360a1bb7 into 627385d7504342599f7b4932fa370ca2(size=158.7 K), total size for store is 175.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:50:05,498 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:05,498 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7., storeName=f10d5787b55aa38b294e59af360a1bb7/info, priority=13, startTime=1731955805465; duration=0sec 2024-11-18T18:50:05,498 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:05,498 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f10d5787b55aa38b294e59af360a1bb7:info 2024-11-18T18:50:06,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:06,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:07,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:07,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-18T18:50:07,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:07,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:07,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/97e69efdab394beb9a45a72adabad40e is 1080, key is row0218/info:/1731955805467/Put/seqid=0 2024-11-18T18:50:07,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741873_1049 (size=13602) 2024-11-18T18:50:07,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741873_1049 (size=13602) 2024-11-18T18:50:07,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/97e69efdab394beb9a45a72adabad40e 2024-11-18T18:50:07,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/97e69efdab394beb9a45a72adabad40e as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/97e69efdab394beb9a45a72adabad40e 2024-11-18T18:50:07,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/97e69efdab394beb9a45a72adabad40e, entries=8, sequenceid=300, filesize=13.3 K 2024-11-18T18:50:07,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=11.56 KB/11836 for f10d5787b55aa38b294e59af360a1bb7 in 27ms, sequenceid=300, compaction requested=true 2024-11-18T18:50:07,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:07,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f10d5787b55aa38b294e59af360a1bb7:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:50:07,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:07,514 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:50:07,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:07,514 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T18:50:07,515 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192984 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:50:07,515 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1541): f10d5787b55aa38b294e59af360a1bb7/info is initiating minor compaction (all files) 2024-11-18T18:50:07,515 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f10d5787b55aa38b294e59af360a1bb7/info in TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:50:07,515 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/627385d7504342599f7b4932fa370ca2, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/2f78b2c62c604480884f967c0fcb3e7c, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/97e69efdab394beb9a45a72adabad40e] into tmpdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp, totalSize=188.5 K 2024-11-18T18:50:07,516 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 627385d7504342599f7b4932fa370ca2, keycount=145, bloomtype=ROW, size=158.7 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1731955785017 2024-11-18T18:50:07,516 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2f78b2c62c604480884f967c0fcb3e7c, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731955805446 2024-11-18T18:50:07,516 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 97e69efdab394beb9a45a72adabad40e, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1731955805467 2024-11-18T18:50:07,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/d244cc8b0ed14d319b30c69102a20fdd is 1080, key is row0226/info:/1731955807489/Put/seqid=0 2024-11-18T18:50:07,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741874_1050 (size=17918) 2024-11-18T18:50:07,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741874_1050 (size=17918) 2024-11-18T18:50:07,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/d244cc8b0ed14d319b30c69102a20fdd 2024-11-18T18:50:07,530 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f10d5787b55aa38b294e59af360a1bb7#info#compaction#89 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:50:07,530 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/01bf0e1d0fa14d74959902657af66bf2 is 1080, key is row0062/info:/1731955785017/Put/seqid=0 2024-11-18T18:50:07,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/d244cc8b0ed14d319b30c69102a20fdd as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/d244cc8b0ed14d319b30c69102a20fdd 2024-11-18T18:50:07,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/d244cc8b0ed14d319b30c69102a20fdd, entries=12, sequenceid=315, filesize=17.5 K 2024-11-18T18:50:07,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for f10d5787b55aa38b294e59af360a1bb7 in 24ms, sequenceid=315, compaction requested=false 2024-11-18T18:50:07,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:07,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38211 {}] regionserver.HRegion(8855): Flush requested on f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:07,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T18:50:07,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741875_1051 (size=183150) 2024-11-18T18:50:07,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741875_1051 (size=183150) 2024-11-18T18:50:07,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/0816861a648d4d73a32041a15883791b is 1080, key is row0238/info:/1731955807515/Put/seqid=0 2024-11-18T18:50:07,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741876_1052 (size=17918) 2024-11-18T18:50:07,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741876_1052 (size=17918) 2024-11-18T18:50:07,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/0816861a648d4d73a32041a15883791b 2024-11-18T18:50:07,552 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/01bf0e1d0fa14d74959902657af66bf2 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/01bf0e1d0fa14d74959902657af66bf2 2024-11-18T18:50:07,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/0816861a648d4d73a32041a15883791b as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/0816861a648d4d73a32041a15883791b 2024-11-18T18:50:07,557 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f10d5787b55aa38b294e59af360a1bb7/info of f10d5787b55aa38b294e59af360a1bb7 into 01bf0e1d0fa14d74959902657af66bf2(size=178.9 K), total size for store is 196.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:50:07,557 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:07,557 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7., storeName=f10d5787b55aa38b294e59af360a1bb7/info, priority=13, startTime=1731955807514; duration=0sec 2024-11-18T18:50:07,557 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:07,557 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f10d5787b55aa38b294e59af360a1bb7:info 2024-11-18T18:50:07,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/0816861a648d4d73a32041a15883791b, entries=12, sequenceid=330, filesize=17.5 K 2024-11-18T18:50:07,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for f10d5787b55aa38b294e59af360a1bb7 in 22ms, sequenceid=330, compaction requested=true 2024-11-18T18:50:07,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:07,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f10d5787b55aa38b294e59af360a1bb7:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T18:50:07,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:07,562 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T18:50:07,563 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 218986 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T18:50:07,563 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1541): f10d5787b55aa38b294e59af360a1bb7/info is initiating minor compaction (all files) 2024-11-18T18:50:07,563 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f10d5787b55aa38b294e59af360a1bb7/info in TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:50:07,563 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/01bf0e1d0fa14d74959902657af66bf2, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/d244cc8b0ed14d319b30c69102a20fdd, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/0816861a648d4d73a32041a15883791b] into tmpdir=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp, totalSize=213.9 K 2024-11-18T18:50:07,564 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 01bf0e1d0fa14d74959902657af66bf2, keycount=164, bloomtype=ROW, size=178.9 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1731955785017 2024-11-18T18:50:07,564 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting d244cc8b0ed14d319b30c69102a20fdd, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731955807489 2024-11-18T18:50:07,564 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0816861a648d4d73a32041a15883791b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1731955807515 2024-11-18T18:50:07,575 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f10d5787b55aa38b294e59af360a1bb7#info#compaction#91 average throughput is 64.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T18:50:07,576 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/6ea0df61992b42debc6f7b00ccfa806a is 1080, key is row0062/info:/1731955785017/Put/seqid=0 2024-11-18T18:50:07,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741877_1053 (size=209225) 2024-11-18T18:50:07,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741877_1053 (size=209225) 2024-11-18T18:50:07,584 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/6ea0df61992b42debc6f7b00ccfa806a as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/6ea0df61992b42debc6f7b00ccfa806a 2024-11-18T18:50:07,590 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f10d5787b55aa38b294e59af360a1bb7/info of f10d5787b55aa38b294e59af360a1bb7 into 6ea0df61992b42debc6f7b00ccfa806a(size=204.3 K), total size for store is 204.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T18:50:07,590 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:07,590 INFO [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7., storeName=f10d5787b55aa38b294e59af360a1bb7/info, priority=13, startTime=1731955807562; duration=0sec 2024-11-18T18:50:07,590 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T18:50:07,590 DEBUG [RS:0;39fff3b0f89c:38211-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f10d5787b55aa38b294e59af360a1bb7:info 2024-11-18T18:50:08,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:08,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:09,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:09,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:09,555 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-18T18:50:09,556 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C38211%2C1731955771609.1731955809556 2024-11-18T18:50:09,568 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,568 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,568 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,569 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,569 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,569 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609/39fff3b0f89c%2C38211%2C1731955771609.1731955772126 with entries=317, filesize=310.19 KB; new WAL /user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609/39fff3b0f89c%2C38211%2C1731955771609.1731955809556 2024-11-18T18:50:09,570 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45575:45575),(127.0.0.1/127.0.0.1:45361:45361)] 2024-11-18T18:50:09,570 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609/39fff3b0f89c%2C38211%2C1731955771609.1731955772126 is not closed yet, will try archiving it next time 2024-11-18T18:50:09,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741833_1009 (size=317642) 2024-11-18T18:50:09,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741833_1009 (size=317642) 2024-11-18T18:50:09,574 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 3f30926e86a799df41ae2228b3086b48: 2024-11-18T18:50:09,574 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing f10d5787b55aa38b294e59af360a1bb7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T18:50:09,578 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/4c1d152cdf694f10be2e0a44c6b6abdc is 1080, key is row0250/info:/1731955807541/Put/seqid=0 2024-11-18T18:50:09,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741879_1055 (size=12523) 2024-11-18T18:50:09,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741879_1055 (size=12523) 2024-11-18T18:50:09,583 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/4c1d152cdf694f10be2e0a44c6b6abdc 2024-11-18T18:50:09,587 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/.tmp/info/4c1d152cdf694f10be2e0a44c6b6abdc as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/4c1d152cdf694f10be2e0a44c6b6abdc 2024-11-18T18:50:09,592 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/4c1d152cdf694f10be2e0a44c6b6abdc, entries=7, sequenceid=342, filesize=12.2 K 2024-11-18T18:50:09,593 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f10d5787b55aa38b294e59af360a1bb7 in 19ms, sequenceid=342, compaction requested=false 2024-11-18T18:50:09,593 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for f10d5787b55aa38b294e59af360a1bb7: 2024-11-18T18:50:09,593 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-18T18:50:09,598 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/info/9818ab958b954231ade3f396a7e384c1 is 193, key is TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7./info:regioninfo/1731955787825/Put/seqid=0 2024-11-18T18:50:09,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741880_1056 (size=6223) 2024-11-18T18:50:09,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741880_1056 (size=6223) 2024-11-18T18:50:09,605 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/info/9818ab958b954231ade3f396a7e384c1 2024-11-18T18:50:09,610 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/.tmp/info/9818ab958b954231ade3f396a7e384c1 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/info/9818ab958b954231ade3f396a7e384c1 2024-11-18T18:50:09,615 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/info/9818ab958b954231ade3f396a7e384c1, entries=5, sequenceid=21, filesize=6.1 K 2024-11-18T18:50:09,616 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 23ms, sequenceid=21, compaction requested=false 2024-11-18T18:50:09,616 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T18:50:09,616 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C38211%2C1731955771609.1731955809616 2024-11-18T18:50:09,625 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,625 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,625 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,625 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,625 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,625 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609/39fff3b0f89c%2C38211%2C1731955771609.1731955809556 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609/39fff3b0f89c%2C38211%2C1731955771609.1731955809616 2024-11-18T18:50:09,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741878_1054 (size=731) 2024-11-18T18:50:09,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741878_1054 (size=731) 2024-11-18T18:50:09,631 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45575:45575),(127.0.0.1/127.0.0.1:45361:45361)] 2024-11-18T18:50:09,631 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609/39fff3b0f89c%2C38211%2C1731955771609.1731955772126 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/oldWALs/39fff3b0f89c%2C38211%2C1731955771609.1731955772126 2024-11-18T18:50:09,632 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T18:50:09,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T18:50:09,632 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:50:09,632 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:50:09,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:50:09,633 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:50:09,633 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/WALs/39fff3b0f89c,38211,1731955771609/39fff3b0f89c%2C38211%2C1731955771609.1731955809556 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/oldWALs/39fff3b0f89c%2C38211%2C1731955771609.1731955809556 2024-11-18T18:50:09,633 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T18:50:09,633 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T18:50:09,633 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1776888618, stopped=false 2024-11-18T18:50:09,633 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39fff3b0f89c,38439,1731955771403 2024-11-18T18:50:09,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:50:09,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:50:09,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:09,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:09,685 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:50:09,685 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:50:09,685 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:50:09,686 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:50:09,686 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39fff3b0f89c,38211,1731955771609' ***** 2024-11-18T18:50:09,686 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T18:50:09,686 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:50:09,686 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:50:09,686 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T18:50:09,686 INFO [RS:0;39fff3b0f89c:38211 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T18:50:09,686 INFO [RS:0;39fff3b0f89c:38211 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T18:50:09,686 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(3091): Received CLOSE for 3f30926e86a799df41ae2228b3086b48 2024-11-18T18:50:09,686 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(3091): Received CLOSE for f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:09,686 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(959): stopping server 39fff3b0f89c,38211,1731955771609 2024-11-18T18:50:09,686 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:50:09,686 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3f30926e86a799df41ae2228b3086b48, disabling compactions & flushes 2024-11-18T18:50:09,687 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T18:50:09,687 INFO [RS:0;39fff3b0f89c:38211 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39fff3b0f89c:38211. 2024-11-18T18:50:09,687 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48. 2024-11-18T18:50:09,687 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48. 2024-11-18T18:50:09,687 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48. after waiting 0 ms 2024-11-18T18:50:09,687 DEBUG [RS:0;39fff3b0f89c:38211 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:50:09,687 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48. 2024-11-18T18:50:09,687 DEBUG [RS:0;39fff3b0f89c:38211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:50:09,687 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T18:50:09,687 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T18:50:09,687 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T18:50:09,687 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T18:50:09,687 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-18T18:50:09,687 DEBUG [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(1325): Online Regions={3f30926e86a799df41ae2228b3086b48=TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48., f10d5787b55aa38b294e59af360a1bb7=TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T18:50:09,687 DEBUG [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3f30926e86a799df41ae2228b3086b48, f10d5787b55aa38b294e59af360a1bb7 2024-11-18T18:50:09,687 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:50:09,687 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:50:09,687 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:50:09,687 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:50:09,687 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:50:09,687 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8->hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6b80dc9f67fd4d0ebb663605cd32e893-bottom] to archive 2024-11-18T18:50:09,688 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T18:50:09,690 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:50:09,691 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=39fff3b0f89c:38439 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-18T18:50:09,691 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-18T18:50:09,692 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-18T18:50:09,692 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:50:09,693 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:50:09,693 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955809687Running coprocessor pre-close hooks at 1731955809687Disabling compacts and flushes for region at 1731955809687Disabling writes for close at 1731955809687Writing region close event to WAL at 1731955809688 (+1 ms)Running coprocessor post-close hooks at 1731955809692 (+4 ms)Closed at 1731955809693 (+1 ms) 2024-11-18T18:50:09,693 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T18:50:09,694 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/3f30926e86a799df41ae2228b3086b48/recovered.edits/134.seqid, newMaxSeqId=134, maxSeqId=129 2024-11-18T18:50:09,695 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48. 2024-11-18T18:50:09,695 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3f30926e86a799df41ae2228b3086b48: Waiting for close lock at 1731955809686Running coprocessor pre-close hooks at 1731955809686Disabling compacts and flushes for region at 1731955809686Disabling writes for close at 1731955809687 (+1 ms)Writing region close event to WAL at 1731955809691 (+4 ms)Running coprocessor post-close hooks at 1731955809695 (+4 ms)Closed at 1731955809695 2024-11-18T18:50:09,695 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731955787105.3f30926e86a799df41ae2228b3086b48. 2024-11-18T18:50:09,695 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f10d5787b55aa38b294e59af360a1bb7, disabling compactions & flushes 2024-11-18T18:50:09,695 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:50:09,695 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:50:09,695 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. after waiting 0 ms 2024-11-18T18:50:09,695 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:50:09,696 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8->hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/59f804024a95bdb9d0c6e5330de69db8/info/6b80dc9f67fd4d0ebb663605cd32e893-top, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-00baa93c143c418c86732d69d2924f83, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-6fc9006b9fd84bab9703c89072eae3a6, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/9fe78915310641b996a974db9908909d, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-a088334c04fd4b06a1f4fcaa16f9005e, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/691ed8925c1c439da270184396fddb0f, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/021a974e497b4451afed2ec385685bf4, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/8e90d530795643e09988ba9385d32bbb, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b0d7750c6cc04fbf8ca3eed0b656511c, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/57b6005275b24878bf63000f75d5fc0e, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/069a4c12af5349e3bb3956712d778875, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/a44bc257d79e4adfaeff7a3a2923b6a8, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/82ff5cd18a834449a692b7c755a12b63, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b5d8332344c64d898b3df71b19deb38c, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/3fc219259d7e4d1992d6d1beae58481b, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/190f12c68c8f4cdaaea5c780d64b6b43, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/f4c0dcedb9b141a59c26730890b07a27, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/58e7b36c2acd4419a32abab1db1d6397, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/627385d7504342599f7b4932fa370ca2, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/bdc25e6988ba43afa4058419b3e69cda, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/2f78b2c62c604480884f967c0fcb3e7c, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/01bf0e1d0fa14d74959902657af66bf2, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/97e69efdab394beb9a45a72adabad40e, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/d244cc8b0ed14d319b30c69102a20fdd, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/0816861a648d4d73a32041a15883791b] to archive 2024-11-18T18:50:09,697 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T18:50:09,699 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/6b80dc9f67fd4d0ebb663605cd32e893.59f804024a95bdb9d0c6e5330de69db8 2024-11-18T18:50:09,700 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-00baa93c143c418c86732d69d2924f83 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-00baa93c143c418c86732d69d2924f83 2024-11-18T18:50:09,701 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-6fc9006b9fd84bab9703c89072eae3a6 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-6fc9006b9fd84bab9703c89072eae3a6 2024-11-18T18:50:09,703 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/9fe78915310641b996a974db9908909d to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/9fe78915310641b996a974db9908909d 2024-11-18T18:50:09,704 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-a088334c04fd4b06a1f4fcaa16f9005e to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/TestLogRolling-testLogRolling=59f804024a95bdb9d0c6e5330de69db8-a088334c04fd4b06a1f4fcaa16f9005e 2024-11-18T18:50:09,705 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/691ed8925c1c439da270184396fddb0f to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/691ed8925c1c439da270184396fddb0f 2024-11-18T18:50:09,706 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/021a974e497b4451afed2ec385685bf4 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/021a974e497b4451afed2ec385685bf4 2024-11-18T18:50:09,708 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/8e90d530795643e09988ba9385d32bbb to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/8e90d530795643e09988ba9385d32bbb 2024-11-18T18:50:09,709 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b0d7750c6cc04fbf8ca3eed0b656511c to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b0d7750c6cc04fbf8ca3eed0b656511c 2024-11-18T18:50:09,710 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/57b6005275b24878bf63000f75d5fc0e to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/57b6005275b24878bf63000f75d5fc0e 2024-11-18T18:50:09,711 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/069a4c12af5349e3bb3956712d778875 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/069a4c12af5349e3bb3956712d778875 2024-11-18T18:50:09,712 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/a44bc257d79e4adfaeff7a3a2923b6a8 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/a44bc257d79e4adfaeff7a3a2923b6a8 2024-11-18T18:50:09,713 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/82ff5cd18a834449a692b7c755a12b63 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/82ff5cd18a834449a692b7c755a12b63 2024-11-18T18:50:09,715 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b5d8332344c64d898b3df71b19deb38c to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/b5d8332344c64d898b3df71b19deb38c 2024-11-18T18:50:09,716 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/3fc219259d7e4d1992d6d1beae58481b to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/3fc219259d7e4d1992d6d1beae58481b 2024-11-18T18:50:09,717 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/190f12c68c8f4cdaaea5c780d64b6b43 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/190f12c68c8f4cdaaea5c780d64b6b43 2024-11-18T18:50:09,718 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/f4c0dcedb9b141a59c26730890b07a27 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/f4c0dcedb9b141a59c26730890b07a27 2024-11-18T18:50:09,719 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/58e7b36c2acd4419a32abab1db1d6397 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/58e7b36c2acd4419a32abab1db1d6397 2024-11-18T18:50:09,720 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/627385d7504342599f7b4932fa370ca2 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/627385d7504342599f7b4932fa370ca2 2024-11-18T18:50:09,721 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/bdc25e6988ba43afa4058419b3e69cda to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/bdc25e6988ba43afa4058419b3e69cda 2024-11-18T18:50:09,723 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/2f78b2c62c604480884f967c0fcb3e7c to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/2f78b2c62c604480884f967c0fcb3e7c 2024-11-18T18:50:09,724 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/01bf0e1d0fa14d74959902657af66bf2 to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/01bf0e1d0fa14d74959902657af66bf2 2024-11-18T18:50:09,725 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/97e69efdab394beb9a45a72adabad40e to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/97e69efdab394beb9a45a72adabad40e 2024-11-18T18:50:09,726 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/d244cc8b0ed14d319b30c69102a20fdd to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/d244cc8b0ed14d319b30c69102a20fdd 2024-11-18T18:50:09,727 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/0816861a648d4d73a32041a15883791b to hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/archive/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/info/0816861a648d4d73a32041a15883791b 2024-11-18T18:50:09,727 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [9fe78915310641b996a974db9908909d=43081, 691ed8925c1c439da270184396fddb0f=12516, 021a974e497b4451afed2ec385685bf4=64811, 8e90d530795643e09988ba9385d32bbb=19000, b0d7750c6cc04fbf8ca3eed0b656511c=19000, 57b6005275b24878bf63000f75d5fc0e=91940, 069a4c12af5349e3bb3956712d778875=17906, a44bc257d79e4adfaeff7a3a2923b6a8=16828, 82ff5cd18a834449a692b7c755a12b63=113606, b5d8332344c64d898b3df71b19deb38c=14672, 3fc219259d7e4d1992d6d1beae58481b=17906, 190f12c68c8f4cdaaea5c780d64b6b43=138707, f4c0dcedb9b141a59c26730890b07a27=16828, 58e7b36c2acd4419a32abab1db1d6397=14673, 627385d7504342599f7b4932fa370ca2=162543, bdc25e6988ba43afa4058419b3e69cda=19013, 2f78b2c62c604480884f967c0fcb3e7c=16839, 01bf0e1d0fa14d74959902657af66bf2=183150, 97e69efdab394beb9a45a72adabad40e=13602, d244cc8b0ed14d319b30c69102a20fdd=17918, 0816861a648d4d73a32041a15883791b=17918] 2024-11-18T18:50:09,730 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/data/default/TestLogRolling-testLogRolling/f10d5787b55aa38b294e59af360a1bb7/recovered.edits/345.seqid, newMaxSeqId=345, maxSeqId=129 2024-11-18T18:50:09,731 INFO [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:50:09,731 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f10d5787b55aa38b294e59af360a1bb7: Waiting for close lock at 1731955809695Running coprocessor pre-close hooks at 1731955809695Disabling compacts and flushes for region at 1731955809695Disabling writes for close at 1731955809695Writing region close event to WAL at 1731955809728 (+33 ms)Running coprocessor post-close hooks at 1731955809731 (+3 ms)Closed at 1731955809731 2024-11-18T18:50:09,731 DEBUG [RS_CLOSE_REGION-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731955787105.f10d5787b55aa38b294e59af360a1bb7. 2024-11-18T18:50:09,887 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(976): stopping server 39fff3b0f89c,38211,1731955771609; all regions closed. 2024-11-18T18:50:09,888 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,888 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,889 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,889 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,889 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741834_1010 (size=8107) 2024-11-18T18:50:09,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741834_1010 (size=8107) 2024-11-18T18:50:09,899 DEBUG [RS:0;39fff3b0f89c:38211 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/oldWALs 2024-11-18T18:50:09,899 INFO [RS:0;39fff3b0f89c:38211 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C38211%2C1731955771609.meta:.meta(num 1731955772558) 2024-11-18T18:50:09,899 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,900 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,900 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,900 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,900 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:09,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741881_1057 (size=780) 2024-11-18T18:50:09,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741881_1057 (size=780) 2024-11-18T18:50:09,904 DEBUG [RS:0;39fff3b0f89c:38211 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/oldWALs 2024-11-18T18:50:09,904 INFO [RS:0;39fff3b0f89c:38211 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C38211%2C1731955771609:(num 1731955809616) 2024-11-18T18:50:09,904 DEBUG [RS:0;39fff3b0f89c:38211 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:50:09,904 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:50:09,904 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:50:09,904 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.ChoreService(370): Chore service for: regionserver/39fff3b0f89c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T18:50:09,904 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:50:09,904 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:50:09,904 INFO [RS:0;39fff3b0f89c:38211 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38211 2024-11-18T18:50:09,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39fff3b0f89c,38211,1731955771609 2024-11-18T18:50:09,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:50:09,913 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:50:09,921 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39fff3b0f89c,38211,1731955771609] 2024-11-18T18:50:09,930 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39fff3b0f89c,38211,1731955771609 already deleted, retry=false 2024-11-18T18:50:09,930 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39fff3b0f89c,38211,1731955771609 expired; onlineServers=0 2024-11-18T18:50:09,930 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39fff3b0f89c,38439,1731955771403' ***** 2024-11-18T18:50:09,930 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T18:50:09,930 INFO [M:0;39fff3b0f89c:38439 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:50:09,930 INFO [M:0;39fff3b0f89c:38439 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:50:09,930 DEBUG [M:0;39fff3b0f89c:38439 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T18:50:09,930 DEBUG [M:0;39fff3b0f89c:38439 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T18:50:09,930 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T18:50:09,930 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955771917 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955771917,5,FailOnTimeoutGroup] 2024-11-18T18:50:09,930 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955771917 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955771917,5,FailOnTimeoutGroup] 2024-11-18T18:50:09,930 INFO [M:0;39fff3b0f89c:38439 {}] hbase.ChoreService(370): Chore service for: master/39fff3b0f89c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T18:50:09,930 INFO [M:0;39fff3b0f89c:38439 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:50:09,930 DEBUG [M:0;39fff3b0f89c:38439 {}] master.HMaster(1795): Stopping service threads 2024-11-18T18:50:09,930 INFO [M:0;39fff3b0f89c:38439 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T18:50:09,930 INFO [M:0;39fff3b0f89c:38439 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:50:09,930 INFO [M:0;39fff3b0f89c:38439 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T18:50:09,931 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T18:50:09,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T18:50:09,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:09,938 DEBUG [M:0;39fff3b0f89c:38439 {}] zookeeper.ZKUtil(347): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T18:50:09,938 WARN [M:0;39fff3b0f89c:38439 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T18:50:09,938 INFO [M:0;39fff3b0f89c:38439 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/.lastflushedseqids 2024-11-18T18:50:09,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741882_1058 (size=228) 2024-11-18T18:50:09,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741882_1058 (size=228) 2024-11-18T18:50:09,946 INFO [M:0;39fff3b0f89c:38439 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T18:50:09,946 INFO [M:0;39fff3b0f89c:38439 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T18:50:09,946 DEBUG [M:0;39fff3b0f89c:38439 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:50:09,946 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:09,946 DEBUG [M:0;39fff3b0f89c:38439 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:09,946 DEBUG [M:0;39fff3b0f89c:38439 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:50:09,946 DEBUG [M:0;39fff3b0f89c:38439 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:09,946 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.44 KB heapSize=63.39 KB 2024-11-18T18:50:09,960 DEBUG [M:0;39fff3b0f89c:38439 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e007a9c9b5ac4dd5851be1467f14f4a5 is 82, key is hbase:meta,,1/info:regioninfo/1731955772599/Put/seqid=0 2024-11-18T18:50:09,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741883_1059 (size=5672) 2024-11-18T18:50:09,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741883_1059 (size=5672) 2024-11-18T18:50:09,964 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e007a9c9b5ac4dd5851be1467f14f4a5 2024-11-18T18:50:09,981 DEBUG [M:0;39fff3b0f89c:38439 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/48791e2c6fc94edf8026682768e768f8 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731955773178/Put/seqid=0 2024-11-18T18:50:09,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741884_1060 (size=7091) 2024-11-18T18:50:09,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741884_1060 (size=7091) 2024-11-18T18:50:09,986 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.84 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/48791e2c6fc94edf8026682768e768f8 2024-11-18T18:50:09,990 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 48791e2c6fc94edf8026682768e768f8 2024-11-18T18:50:10,000 INFO [regionserver/39fff3b0f89c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:50:10,003 DEBUG [M:0;39fff3b0f89c:38439 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59f1cd1762574850a0b9efe697a522e2 is 69, key is 39fff3b0f89c,38211,1731955771609/rs:state/1731955771982/Put/seqid=0 2024-11-18T18:50:10,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741885_1061 (size=5156) 2024-11-18T18:50:10,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741885_1061 (size=5156) 2024-11-18T18:50:10,008 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59f1cd1762574850a0b9efe697a522e2 2024-11-18T18:50:10,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:50:10,022 INFO [RS:0;39fff3b0f89c:38211 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:50:10,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38211-0x101509179df0001, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:50:10,022 INFO [RS:0;39fff3b0f89c:38211 {}] regionserver.HRegionServer(1031): Exiting; stopping=39fff3b0f89c,38211,1731955771609; zookeeper connection closed. 2024-11-18T18:50:10,022 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@42ef4b52 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@42ef4b52 2024-11-18T18:50:10,022 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T18:50:10,026 DEBUG [M:0;39fff3b0f89c:38439 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3e9bb64e848f4c448997e19780257b9e is 52, key is load_balancer_on/state:d/1731955772766/Put/seqid=0 2024-11-18T18:50:10,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741886_1062 (size=5056) 2024-11-18T18:50:10,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741886_1062 (size=5056) 2024-11-18T18:50:10,030 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3e9bb64e848f4c448997e19780257b9e 2024-11-18T18:50:10,035 DEBUG [M:0;39fff3b0f89c:38439 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e007a9c9b5ac4dd5851be1467f14f4a5 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e007a9c9b5ac4dd5851be1467f14f4a5 2024-11-18T18:50:10,039 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e007a9c9b5ac4dd5851be1467f14f4a5, entries=8, sequenceid=125, filesize=5.5 K 2024-11-18T18:50:10,040 DEBUG [M:0;39fff3b0f89c:38439 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/48791e2c6fc94edf8026682768e768f8 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/48791e2c6fc94edf8026682768e768f8 2024-11-18T18:50:10,045 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 48791e2c6fc94edf8026682768e768f8 2024-11-18T18:50:10,045 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/48791e2c6fc94edf8026682768e768f8, entries=13, sequenceid=125, filesize=6.9 K 2024-11-18T18:50:10,046 DEBUG [M:0;39fff3b0f89c:38439 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/59f1cd1762574850a0b9efe697a522e2 as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/59f1cd1762574850a0b9efe697a522e2 2024-11-18T18:50:10,050 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/59f1cd1762574850a0b9efe697a522e2, entries=1, sequenceid=125, filesize=5.0 K 2024-11-18T18:50:10,051 DEBUG [M:0;39fff3b0f89c:38439 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3e9bb64e848f4c448997e19780257b9e as hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3e9bb64e848f4c448997e19780257b9e 2024-11-18T18:50:10,055 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38889/user/jenkins/test-data/4d7e292b-2eb6-482d-261d-7bc750d28864/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3e9bb64e848f4c448997e19780257b9e, entries=1, sequenceid=125, filesize=4.9 K 2024-11-18T18:50:10,056 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=125, compaction requested=false 2024-11-18T18:50:10,057 INFO [M:0;39fff3b0f89c:38439 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:10,057 DEBUG [M:0;39fff3b0f89c:38439 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955809946Disabling compacts and flushes for region at 1731955809946Disabling writes for close at 1731955809946Obtaining lock to block concurrent updates at 1731955809946Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731955809946Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52675, getHeapSize=64848, getOffHeapSize=0, getCellsCount=148 at 1731955809947 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731955809947Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731955809947Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731955809960 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731955809960Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731955809968 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731955809981 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731955809981Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731955809990 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731955810003 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731955810003Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731955810012 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731955810025 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731955810025Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6dd30df: reopening flushed file at 1731955810034 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35789d4c: reopening flushed file at 1731955810040 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e928e2d: reopening flushed file at 1731955810045 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45c262b5: reopening flushed file at 1731955810050 (+5 ms)Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=125, compaction requested=false at 1731955810056 (+6 ms)Writing region close event to WAL at 1731955810057 (+1 ms)Closed at 1731955810057 2024-11-18T18:50:10,057 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:10,057 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:10,057 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:10,058 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:10,058 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:10,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39443 is added to blk_1073741830_1006 (size=61344) 2024-11-18T18:50:10,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36343 is added to blk_1073741830_1006 (size=61344) 2024-11-18T18:50:10,060 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:50:10,060 INFO [M:0;39fff3b0f89c:38439 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T18:50:10,060 INFO [M:0;39fff3b0f89c:38439 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38439 2024-11-18T18:50:10,060 INFO [M:0;39fff3b0f89c:38439 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:50:10,168 INFO [M:0;39fff3b0f89c:38439 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:50:10,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:50:10,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38439-0x101509179df0000, quorum=127.0.0.1:50680, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:50:10,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2cfb8c65{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:50:10,171 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33cca936{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:50:10,171 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:50:10,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51d47a1d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:50:10,171 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e0ccf7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/hadoop.log.dir/,STOPPED} 2024-11-18T18:50:10,173 WARN [BP-695063187-172.17.0.2-1731955769491 heartbeating to localhost/127.0.0.1:38889 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:50:10,173 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:50:10,173 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:50:10,173 WARN [BP-695063187-172.17.0.2-1731955769491 heartbeating to localhost/127.0.0.1:38889 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-695063187-172.17.0.2-1731955769491 (Datanode Uuid 0a40822d-7b5f-4544-beb6-7b538ec2ee0a) service to localhost/127.0.0.1:38889 2024-11-18T18:50:10,174 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2/data/data3/current/BP-695063187-172.17.0.2-1731955769491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:50:10,174 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2/data/data4/current/BP-695063187-172.17.0.2-1731955769491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:50:10,174 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:50:10,176 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@389941f3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:50:10,176 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1370e23f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:50:10,176 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:50:10,176 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ef16bcf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:50:10,177 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@638450ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/hadoop.log.dir/,STOPPED} 2024-11-18T18:50:10,178 WARN [BP-695063187-172.17.0.2-1731955769491 heartbeating to localhost/127.0.0.1:38889 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:50:10,178 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:50:10,178 WARN [BP-695063187-172.17.0.2-1731955769491 heartbeating to localhost/127.0.0.1:38889 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-695063187-172.17.0.2-1731955769491 (Datanode Uuid eef88227-c2e2-46b2-80ca-e86d1a417b16) service to localhost/127.0.0.1:38889 2024-11-18T18:50:10,178 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:50:10,179 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2/data/data1/current/BP-695063187-172.17.0.2-1731955769491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:50:10,179 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/cluster_cf12c98d-8134-5c6f-adb6-89fad0dd01b2/data/data2/current/BP-695063187-172.17.0.2-1731955769491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:50:10,179 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:50:10,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3225d099{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:50:10,184 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5c2acbdd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:50:10,184 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:50:10,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6150e164{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:50:10,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c05323f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/hadoop.log.dir/,STOPPED} 2024-11-18T18:50:10,193 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T18:50:10,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T18:50:10,234 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 205) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38889 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38889 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:38889 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38889 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:38889 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38889 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38889 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38889 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=131 (was 210), ProcessCount=11 (was 11), AvailableMemoryMB=4667 (was 4366) - AvailableMemoryMB LEAK? - 2024-11-18T18:50:10,243 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=131, ProcessCount=11, AvailableMemoryMB=4667 2024-11-18T18:50:10,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T18:50:10,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/hadoop.log.dir so I do NOT create it in target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00 2024-11-18T18:50:10,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/7d8789f5-3564-7f4b-5a7b-46ec84b6e9c6/hadoop.tmp.dir so I do NOT create it in target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00 2024-11-18T18:50:10,243 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984, deleteOnExit=true 2024-11-18T18:50:10,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T18:50:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/test.cache.data in system properties and HBase conf 2024-11-18T18:50:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T18:50:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/hadoop.log.dir in system properties and HBase conf 2024-11-18T18:50:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T18:50:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T18:50:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T18:50:10,244 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T18:50:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:50:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T18:50:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T18:50:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:50:10,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T18:50:10,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T18:50:10,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T18:50:10,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:50:10,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T18:50:10,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/nfs.dump.dir in system properties and HBase conf 2024-11-18T18:50:10,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/java.io.tmpdir in system properties and HBase conf 2024-11-18T18:50:10,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T18:50:10,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T18:50:10,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T18:50:10,257 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:50:10,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:10,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:10,560 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:50:10,563 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:50:10,565 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:50:10,565 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:50:10,565 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:50:10,565 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:50:10,566 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c18422f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:50:10,566 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b907417{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:50:10,657 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b05d1eb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/java.io.tmpdir/jetty-localhost-42783-hadoop-hdfs-3_4_1-tests_jar-_-any-7378563163374233288/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:50:10,657 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a377f89{HTTP/1.1, (http/1.1)}{localhost:42783} 2024-11-18T18:50:10,657 INFO [Time-limited test {}] server.Server(415): Started @288046ms 2024-11-18T18:50:10,667 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T18:50:10,861 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:50:10,863 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:50:10,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:50:10,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:50:10,864 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T18:50:10,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d8eb6c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:50:10,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33816e95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:50:10,957 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@442cd635{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/java.io.tmpdir/jetty-localhost-39891-hadoop-hdfs-3_4_1-tests_jar-_-any-15339121415356412187/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:50:10,958 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d1d035a{HTTP/1.1, (http/1.1)}{localhost:39891} 2024-11-18T18:50:10,958 INFO [Time-limited test {}] server.Server(415): Started @288347ms 2024-11-18T18:50:10,959 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:50:10,998 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T18:50:11,000 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T18:50:11,001 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T18:50:11,001 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T18:50:11,001 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T18:50:11,003 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c447438{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/hadoop.log.dir/,AVAILABLE} 2024-11-18T18:50:11,003 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c071501{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T18:50:11,098 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@607ad43a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/java.io.tmpdir/jetty-localhost-33983-hadoop-hdfs-3_4_1-tests_jar-_-any-15699636301268431975/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:50:11,099 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4420c496{HTTP/1.1, (http/1.1)}{localhost:33983} 2024-11-18T18:50:11,099 INFO [Time-limited test {}] server.Server(415): Started @288488ms 2024-11-18T18:50:11,100 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T18:50:11,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:11,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:11,856 WARN [Thread-2508 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984/data/data1/current/BP-924774165-172.17.0.2-1731955810261/current, will proceed with Du for space computation calculation, 2024-11-18T18:50:11,857 WARN [Thread-2509 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984/data/data2/current/BP-924774165-172.17.0.2-1731955810261/current, will proceed with Du for space computation calculation, 2024-11-18T18:50:11,875 WARN [Thread-2472 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:50:11,877 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf4c5adc35b04ef21 with lease ID 0xc838892d7e93a8c8: Processing first storage report for DS-9042c19d-1a14-4a3b-b376-9bcb1cca9dde from datanode DatanodeRegistration(127.0.0.1:45329, datanodeUuid=797b505a-0ba9-4fae-8691-e7a02dbdfdbc, infoPort=42137, infoSecurePort=0, ipcPort=40139, storageInfo=lv=-57;cid=testClusterID;nsid=88976584;c=1731955810261) 2024-11-18T18:50:11,877 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf4c5adc35b04ef21 with lease ID 0xc838892d7e93a8c8: from storage DS-9042c19d-1a14-4a3b-b376-9bcb1cca9dde node DatanodeRegistration(127.0.0.1:45329, datanodeUuid=797b505a-0ba9-4fae-8691-e7a02dbdfdbc, infoPort=42137, infoSecurePort=0, ipcPort=40139, storageInfo=lv=-57;cid=testClusterID;nsid=88976584;c=1731955810261), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:50:11,877 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf4c5adc35b04ef21 with lease ID 0xc838892d7e93a8c8: Processing first storage report for DS-14d1174f-0106-4325-ab21-f826a4b1f67e from datanode DatanodeRegistration(127.0.0.1:45329, datanodeUuid=797b505a-0ba9-4fae-8691-e7a02dbdfdbc, infoPort=42137, infoSecurePort=0, ipcPort=40139, storageInfo=lv=-57;cid=testClusterID;nsid=88976584;c=1731955810261) 2024-11-18T18:50:11,877 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf4c5adc35b04ef21 with lease ID 0xc838892d7e93a8c8: from storage DS-14d1174f-0106-4325-ab21-f826a4b1f67e node DatanodeRegistration(127.0.0.1:45329, datanodeUuid=797b505a-0ba9-4fae-8691-e7a02dbdfdbc, infoPort=42137, infoSecurePort=0, ipcPort=40139, storageInfo=lv=-57;cid=testClusterID;nsid=88976584;c=1731955810261), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:50:12,010 WARN [Thread-2519 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984/data/data3/current/BP-924774165-172.17.0.2-1731955810261/current, will proceed with Du for space computation calculation, 2024-11-18T18:50:12,010 WARN [Thread-2520 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984/data/data4/current/BP-924774165-172.17.0.2-1731955810261/current, will proceed with Du for space computation calculation, 2024-11-18T18:50:12,025 WARN [Thread-2495 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T18:50:12,027 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3e26ef472fe36435 with lease ID 0xc838892d7e93a8c9: Processing first storage report for DS-05201fbd-4e0a-4fca-b230-c33b71065c8e from datanode DatanodeRegistration(127.0.0.1:39017, datanodeUuid=ef311c7b-1339-47b9-aba2-bef66aa91996, infoPort=38145, infoSecurePort=0, ipcPort=36665, storageInfo=lv=-57;cid=testClusterID;nsid=88976584;c=1731955810261) 2024-11-18T18:50:12,027 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3e26ef472fe36435 with lease ID 0xc838892d7e93a8c9: from storage DS-05201fbd-4e0a-4fca-b230-c33b71065c8e node DatanodeRegistration(127.0.0.1:39017, datanodeUuid=ef311c7b-1339-47b9-aba2-bef66aa91996, infoPort=38145, infoSecurePort=0, ipcPort=36665, storageInfo=lv=-57;cid=testClusterID;nsid=88976584;c=1731955810261), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:50:12,027 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3e26ef472fe36435 with lease ID 0xc838892d7e93a8c9: Processing first storage report for DS-53e0664b-020b-4374-9a35-aba37fd45a6b from datanode DatanodeRegistration(127.0.0.1:39017, datanodeUuid=ef311c7b-1339-47b9-aba2-bef66aa91996, infoPort=38145, infoSecurePort=0, ipcPort=36665, storageInfo=lv=-57;cid=testClusterID;nsid=88976584;c=1731955810261) 2024-11-18T18:50:12,027 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3e26ef472fe36435 with lease ID 0xc838892d7e93a8c9: from storage DS-53e0664b-020b-4374-9a35-aba37fd45a6b node DatanodeRegistration(127.0.0.1:39017, datanodeUuid=ef311c7b-1339-47b9-aba2-bef66aa91996, infoPort=38145, infoSecurePort=0, ipcPort=36665, storageInfo=lv=-57;cid=testClusterID;nsid=88976584;c=1731955810261), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T18:50:12,131 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00 2024-11-18T18:50:12,136 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984/zookeeper_0, clientPort=64585, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T18:50:12,137 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64585 2024-11-18T18:50:12,137 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:50:12,139 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:50:12,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:50:12,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741825_1001 (size=7) 2024-11-18T18:50:12,147 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d with version=8 2024-11-18T18:50:12,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35907/user/jenkins/test-data/d17a6849-37f8-264b-7725-dd93f739adb9/hbase-staging 2024-11-18T18:50:12,150 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:50:12,150 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:50:12,150 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:50:12,150 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:50:12,150 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:50:12,150 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:50:12,150 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T18:50:12,150 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:50:12,151 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45039 2024-11-18T18:50:12,152 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45039 connecting to ZooKeeper ensemble=127.0.0.1:64585 2024-11-18T18:50:12,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:450390x0, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:50:12,232 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45039-0x101509219100000 connected 2024-11-18T18:50:12,305 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:50:12,309 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:50:12,315 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:50:12,315 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d, hbase.cluster.distributed=false 2024-11-18T18:50:12,318 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:50:12,319 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45039 2024-11-18T18:50:12,319 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45039 2024-11-18T18:50:12,319 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45039 2024-11-18T18:50:12,319 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45039 2024-11-18T18:50:12,320 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45039 2024-11-18T18:50:12,334 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39fff3b0f89c:0 server-side Connection retries=45 2024-11-18T18:50:12,334 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:50:12,334 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T18:50:12,334 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T18:50:12,334 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T18:50:12,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T18:50:12,335 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T18:50:12,335 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T18:50:12,335 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33481 2024-11-18T18:50:12,336 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33481 connecting to ZooKeeper ensemble=127.0.0.1:64585 2024-11-18T18:50:12,337 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:50:12,338 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:50:12,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334810x0, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T18:50:12,352 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:334810x0, quorum=127.0.0.1:64585, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:50:12,352 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33481-0x101509219100001 connected 2024-11-18T18:50:12,352 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T18:50:12,353 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T18:50:12,353 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T18:50:12,355 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T18:50:12,355 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33481 2024-11-18T18:50:12,355 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33481 2024-11-18T18:50:12,357 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33481 2024-11-18T18:50:12,357 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33481 2024-11-18T18:50:12,358 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33481 2024-11-18T18:50:12,368 DEBUG [M:0;39fff3b0f89c:45039 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39fff3b0f89c:45039 2024-11-18T18:50:12,368 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39fff3b0f89c,45039,1731955812150 2024-11-18T18:50:12,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:50:12,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:50:12,377 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39fff3b0f89c,45039,1731955812150 2024-11-18T18:50:12,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:12,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T18:50:12,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:12,385 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T18:50:12,385 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39fff3b0f89c,45039,1731955812150 from backup master directory 2024-11-18T18:50:12,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:50:12,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39fff3b0f89c,45039,1731955812150 2024-11-18T18:50:12,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T18:50:12,393 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:50:12,393 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39fff3b0f89c,45039,1731955812150 2024-11-18T18:50:12,397 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/hbase.id] with ID: ea11b9ce-26fb-4072-8149-dc4784aefa1a 2024-11-18T18:50:12,397 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/.tmp/hbase.id 2024-11-18T18:50:12,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:50:12,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741826_1002 (size=42) 2024-11-18T18:50:12,403 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/.tmp/hbase.id]:[hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/hbase.id] 2024-11-18T18:50:12,411 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:50:12,411 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T18:50:12,413 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T18:50:12,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:12,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:12,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:50:12,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741827_1003 (size=196) 2024-11-18T18:50:12,428 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T18:50:12,429 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T18:50:12,430 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:50:12,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:50:12,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741828_1004 (size=1189) 2024-11-18T18:50:12,439 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store 2024-11-18T18:50:12,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:50:12,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741829_1005 (size=34) 2024-11-18T18:50:12,446 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:50:12,446 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:50:12,446 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:12,446 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:12,446 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:50:12,446 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:12,446 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:12,447 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955812446Disabling compacts and flushes for region at 1731955812446Disabling writes for close at 1731955812446Writing region close event to WAL at 1731955812446Closed at 1731955812446 2024-11-18T18:50:12,448 WARN [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/.initializing 2024-11-18T18:50:12,448 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/WALs/39fff3b0f89c,45039,1731955812150 2024-11-18T18:50:12,451 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C45039%2C1731955812150, suffix=, logDir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/WALs/39fff3b0f89c,45039,1731955812150, archiveDir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/oldWALs, maxLogs=10 2024-11-18T18:50:12,451 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C45039%2C1731955812150.1731955812451 2024-11-18T18:50:12,458 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/WALs/39fff3b0f89c,45039,1731955812150/39fff3b0f89c%2C45039%2C1731955812150.1731955812451 2024-11-18T18:50:12,459 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42137:42137),(127.0.0.1/127.0.0.1:38145:38145)] 2024-11-18T18:50:12,460 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:50:12,460 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:50:12,460 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,460 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,462 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,463 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T18:50:12,464 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:12,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:50:12,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,465 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T18:50:12,466 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:12,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:50:12,466 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T18:50:12,467 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:12,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:50:12,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T18:50:12,469 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:12,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T18:50:12,470 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,471 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,471 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,473 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,473 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,474 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T18:50:12,475 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T18:50:12,478 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:50:12,478 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688865, jitterRate=-0.1240634024143219}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T18:50:12,479 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731955812460Initializing all the Stores at 1731955812461 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955812461Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955812462 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955812462Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955812462Cleaning up temporary data from old regions at 1731955812473 (+11 ms)Region opened successfully at 1731955812479 (+6 ms) 2024-11-18T18:50:12,480 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T18:50:12,483 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49d2d8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:50:12,484 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T18:50:12,484 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T18:50:12,484 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T18:50:12,484 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T18:50:12,485 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T18:50:12,485 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T18:50:12,485 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T18:50:12,488 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T18:50:12,489 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T18:50:12,496 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T18:50:12,496 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T18:50:12,497 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T18:50:12,505 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T18:50:12,505 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T18:50:12,506 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T18:50:12,513 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T18:50:12,515 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T18:50:12,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:12,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:12,521 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T18:50:12,525 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T18:50:12,535 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T18:50:12,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:50:12,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T18:50:12,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:12,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:12,544 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39fff3b0f89c,45039,1731955812150, sessionid=0x101509219100000, setting cluster-up flag (Was=false) 2024-11-18T18:50:12,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:12,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:12,585 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T18:50:12,586 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,45039,1731955812150 2024-11-18T18:50:12,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:12,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:12,630 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T18:50:12,634 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39fff3b0f89c,45039,1731955812150 2024-11-18T18:50:12,636 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T18:50:12,639 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T18:50:12,640 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T18:50:12,640 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T18:50:12,640 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39fff3b0f89c,45039,1731955812150 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T18:50:12,644 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:50:12,644 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:50:12,644 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:50:12,644 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=5, maxPoolSize=5 2024-11-18T18:50:12,644 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39fff3b0f89c:0, corePoolSize=10, maxPoolSize=10 2024-11-18T18:50:12,645 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,645 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:50:12,645 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,646 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731955842646 2024-11-18T18:50:12,646 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T18:50:12,646 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T18:50:12,646 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T18:50:12,646 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T18:50:12,646 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T18:50:12,646 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T18:50:12,647 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,647 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:50:12,647 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T18:50:12,647 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T18:50:12,647 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T18:50:12,647 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T18:50:12,647 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T18:50:12,647 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T18:50:12,647 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955812647,5,FailOnTimeoutGroup] 2024-11-18T18:50:12,647 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955812647,5,FailOnTimeoutGroup] 2024-11-18T18:50:12,647 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,648 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T18:50:12,648 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:12,648 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,648 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,648 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T18:50:12,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:50:12,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741831_1007 (size=1321) 2024-11-18T18:50:12,653 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T18:50:12,653 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d 2024-11-18T18:50:12,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:50:12,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741832_1008 (size=32) 2024-11-18T18:50:12,660 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(746): ClusterId : ea11b9ce-26fb-4072-8149-dc4784aefa1a 2024-11-18T18:50:12,660 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:50:12,660 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T18:50:12,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:50:12,663 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:50:12,663 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:12,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:50:12,663 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:50:12,664 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:50:12,664 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:12,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:50:12,665 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:50:12,666 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:50:12,666 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:12,666 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:50:12,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:50:12,668 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:50:12,668 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:12,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:50:12,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:50:12,669 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T18:50:12,669 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T18:50:12,669 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/1588230740 2024-11-18T18:50:12,669 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/1588230740 2024-11-18T18:50:12,671 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:50:12,671 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:50:12,672 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:50:12,673 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:50:12,675 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T18:50:12,675 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713656, jitterRate=-0.09253951907157898}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:50:12,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731955812660Initializing all the Stores at 1731955812661 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955812661Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955812661Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955812661Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955812661Cleaning up temporary data from old regions at 1731955812671 (+10 ms)Region opened successfully at 1731955812676 (+5 ms) 2024-11-18T18:50:12,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:50:12,676 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:50:12,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:50:12,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:50:12,676 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:50:12,677 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:50:12,677 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955812676Disabling compacts and flushes for region at 1731955812676Disabling writes for close at 1731955812676Writing region close event to WAL at 1731955812677 (+1 ms)Closed at 1731955812677 2024-11-18T18:50:12,677 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T18:50:12,677 DEBUG [RS:0;39fff3b0f89c:33481 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e51d9a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39fff3b0f89c/172.17.0.2:0 2024-11-18T18:50:12,678 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:50:12,678 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T18:50:12,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T18:50:12,679 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:50:12,680 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T18:50:12,688 DEBUG [RS:0;39fff3b0f89c:33481 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39fff3b0f89c:33481 2024-11-18T18:50:12,688 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T18:50:12,688 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T18:50:12,688 DEBUG [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T18:50:12,689 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(2659): reportForDuty to master=39fff3b0f89c,45039,1731955812150 with port=33481, startcode=1731955812334 2024-11-18T18:50:12,689 DEBUG [RS:0;39fff3b0f89c:33481 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T18:50:12,691 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57927, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T18:50:12,691 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45039 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39fff3b0f89c,33481,1731955812334 2024-11-18T18:50:12,691 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45039 {}] master.ServerManager(517): Registering regionserver=39fff3b0f89c,33481,1731955812334 2024-11-18T18:50:12,692 DEBUG [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d 2024-11-18T18:50:12,692 DEBUG [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46003 2024-11-18T18:50:12,692 DEBUG [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T18:50:12,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:50:12,702 DEBUG [RS:0;39fff3b0f89c:33481 {}] zookeeper.ZKUtil(111): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39fff3b0f89c,33481,1731955812334 2024-11-18T18:50:12,702 WARN [RS:0;39fff3b0f89c:33481 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T18:50:12,702 INFO [RS:0;39fff3b0f89c:33481 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:50:12,702 DEBUG [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/WALs/39fff3b0f89c,33481,1731955812334 2024-11-18T18:50:12,702 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39fff3b0f89c,33481,1731955812334] 2024-11-18T18:50:12,705 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T18:50:12,707 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T18:50:12,707 INFO [RS:0;39fff3b0f89c:33481 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T18:50:12,707 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,708 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T18:50:12,708 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T18:50:12,708 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39fff3b0f89c:0, corePoolSize=2, maxPoolSize=2 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39fff3b0f89c:0, corePoolSize=1, maxPoolSize=1 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:50:12,709 DEBUG [RS:0;39fff3b0f89c:33481 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39fff3b0f89c:0, corePoolSize=3, maxPoolSize=3 2024-11-18T18:50:12,711 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,711 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,711 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,711 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,711 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,711 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,33481,1731955812334-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:50:12,724 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T18:50:12,724 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,33481,1731955812334-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,724 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,724 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.Replication(171): 39fff3b0f89c,33481,1731955812334 started 2024-11-18T18:50:12,735 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:12,735 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(1482): Serving as 39fff3b0f89c,33481,1731955812334, RpcServer on 39fff3b0f89c/172.17.0.2:33481, sessionid=0x101509219100001 2024-11-18T18:50:12,736 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T18:50:12,736 DEBUG [RS:0;39fff3b0f89c:33481 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39fff3b0f89c,33481,1731955812334 2024-11-18T18:50:12,736 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,33481,1731955812334' 2024-11-18T18:50:12,736 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T18:50:12,736 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T18:50:12,737 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T18:50:12,737 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T18:50:12,737 DEBUG [RS:0;39fff3b0f89c:33481 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39fff3b0f89c,33481,1731955812334 2024-11-18T18:50:12,737 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39fff3b0f89c,33481,1731955812334' 2024-11-18T18:50:12,737 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T18:50:12,737 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T18:50:12,737 DEBUG [RS:0;39fff3b0f89c:33481 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T18:50:12,737 INFO [RS:0;39fff3b0f89c:33481 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T18:50:12,737 INFO [RS:0;39fff3b0f89c:33481 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T18:50:12,830 WARN [39fff3b0f89c:45039 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-18T18:50:12,839 INFO [RS:0;39fff3b0f89c:33481 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C33481%2C1731955812334, suffix=, logDir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/WALs/39fff3b0f89c,33481,1731955812334, archiveDir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/oldWALs, maxLogs=32 2024-11-18T18:50:12,840 INFO [RS:0;39fff3b0f89c:33481 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C33481%2C1731955812334.1731955812840 2024-11-18T18:50:12,844 INFO [RS:0;39fff3b0f89c:33481 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/WALs/39fff3b0f89c,33481,1731955812334/39fff3b0f89c%2C33481%2C1731955812334.1731955812840 2024-11-18T18:50:12,851 DEBUG [RS:0;39fff3b0f89c:33481 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38145:38145),(127.0.0.1/127.0.0.1:42137:42137)] 2024-11-18T18:50:13,080 DEBUG [39fff3b0f89c:45039 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T18:50:13,081 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39fff3b0f89c,33481,1731955812334 2024-11-18T18:50:13,082 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,33481,1731955812334, state=OPENING 2024-11-18T18:50:13,118 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T18:50:13,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:13,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:13,127 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:50:13,127 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:50:13,127 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T18:50:13,127 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,33481,1731955812334}] 2024-11-18T18:50:13,280 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T18:50:13,285 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51247, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T18:50:13,289 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T18:50:13,289 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:50:13,292 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39fff3b0f89c%2C33481%2C1731955812334.meta, suffix=.meta, logDir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/WALs/39fff3b0f89c,33481,1731955812334, archiveDir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/oldWALs, maxLogs=32 2024-11-18T18:50:13,292 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39fff3b0f89c%2C33481%2C1731955812334.meta.1731955813292.meta 2024-11-18T18:50:13,298 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/WALs/39fff3b0f89c,33481,1731955812334/39fff3b0f89c%2C33481%2C1731955812334.meta.1731955813292.meta 2024-11-18T18:50:13,303 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38145:38145),(127.0.0.1/127.0.0.1:42137:42137)] 2024-11-18T18:50:13,307 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T18:50:13,308 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T18:50:13,308 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T18:50:13,308 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T18:50:13,308 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T18:50:13,308 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T18:50:13,308 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T18:50:13,308 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T18:50:13,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T18:50:13,310 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T18:50:13,310 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:13,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:50:13,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T18:50:13,312 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T18:50:13,312 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:13,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:50:13,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T18:50:13,313 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T18:50:13,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:13,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:50:13,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T18:50:13,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T18:50:13,314 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T18:50:13,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T18:50:13,314 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T18:50:13,315 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/1588230740 2024-11-18T18:50:13,316 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/1588230740 2024-11-18T18:50:13,317 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T18:50:13,317 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T18:50:13,317 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T18:50:13,318 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T18:50:13,319 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741195, jitterRate=-0.05752307176589966}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T18:50:13,319 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T18:50:13,320 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731955813308Writing region info on filesystem at 1731955813308Initializing all the Stores at 1731955813309 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955813309Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955813309Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731955813309Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731955813309Cleaning up temporary data from old regions at 1731955813317 (+8 ms)Running coprocessor post-open hooks at 1731955813319 (+2 ms)Region opened successfully at 1731955813319 2024-11-18T18:50:13,320 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731955813280 2024-11-18T18:50:13,322 DEBUG [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T18:50:13,323 INFO [RS_OPEN_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T18:50:13,323 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39fff3b0f89c,33481,1731955812334 2024-11-18T18:50:13,324 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39fff3b0f89c,33481,1731955812334, state=OPEN 2024-11-18T18:50:13,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:50:13,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T18:50:13,355 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39fff3b0f89c,33481,1731955812334 2024-11-18T18:50:13,355 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:50:13,355 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T18:50:13,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T18:50:13,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39fff3b0f89c,33481,1731955812334 in 228 msec 2024-11-18T18:50:13,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T18:50:13,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 680 msec 2024-11-18T18:50:13,362 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T18:50:13,362 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T18:50:13,363 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:50:13,363 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,33481,1731955812334, seqNum=-1] 2024-11-18T18:50:13,364 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:50:13,365 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35701, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:50:13,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 732 msec 2024-11-18T18:50:13,370 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731955813370, completionTime=-1 2024-11-18T18:50:13,370 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T18:50:13,370 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-18T18:50:13,372 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-18T18:50:13,372 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731955873372 2024-11-18T18:50:13,372 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731955933372 2024-11-18T18:50:13,372 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-18T18:50:13,372 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,45039,1731955812150-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:13,372 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,45039,1731955812150-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:13,373 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,45039,1731955812150-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:13,373 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39fff3b0f89c:45039, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:13,373 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:13,373 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:13,374 DEBUG [master/39fff3b0f89c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T18:50:13,376 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.983sec 2024-11-18T18:50:13,376 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T18:50:13,376 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T18:50:13,376 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T18:50:13,376 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T18:50:13,376 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T18:50:13,376 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,45039,1731955812150-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T18:50:13,376 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,45039,1731955812150-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T18:50:13,379 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T18:50:13,379 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T18:50:13,379 INFO [master/39fff3b0f89c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39fff3b0f89c,45039,1731955812150-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T18:50:13,460 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4125913c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:50:13,460 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39fff3b0f89c,45039,-1 for getting cluster id 2024-11-18T18:50:13,460 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T18:50:13,462 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ea11b9ce-26fb-4072-8149-dc4784aefa1a' 2024-11-18T18:50:13,462 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T18:50:13,462 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ea11b9ce-26fb-4072-8149-dc4784aefa1a" 2024-11-18T18:50:13,463 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6251961, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:50:13,463 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39fff3b0f89c,45039,-1] 2024-11-18T18:50:13,463 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T18:50:13,463 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:50:13,465 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37484, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T18:50:13,466 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@603630ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T18:50:13,466 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T18:50:13,468 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39fff3b0f89c,33481,1731955812334, seqNum=-1] 2024-11-18T18:50:13,468 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T18:50:13,469 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39252, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T18:50:13,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39fff3b0f89c,45039,1731955812150 2024-11-18T18:50:13,472 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T18:50:13,474 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T18:50:13,475 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T18:50:13,477 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/WALs/test.com,8080,1, archiveDir=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/oldWALs, maxLogs=32 2024-11-18T18:50:13,478 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731955813477 2024-11-18T18:50:13,485 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/WALs/test.com,8080,1/test.com%2C8080%2C1.1731955813477 2024-11-18T18:50:13,486 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38145:38145),(127.0.0.1/127.0.0.1:42137:42137)] 2024-11-18T18:50:13,487 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731955813487 2024-11-18T18:50:13,493 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,493 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,493 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,493 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,493 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,493 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/WALs/test.com,8080,1/test.com%2C8080%2C1.1731955813477 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/WALs/test.com,8080,1/test.com%2C8080%2C1.1731955813487 2024-11-18T18:50:13,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741835_1011 (size=93) 2024-11-18T18:50:13,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741835_1011 (size=93) 2024-11-18T18:50:13,499 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/WALs/test.com,8080,1/test.com%2C8080%2C1.1731955813477 to hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/oldWALs/test.com%2C8080%2C1.1731955813477 2024-11-18T18:50:13,499 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38145:38145),(127.0.0.1/127.0.0.1:42137:42137)] 2024-11-18T18:50:13,500 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,500 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,500 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,500 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,500 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741836_1012 (size=93) 2024-11-18T18:50:13,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741836_1012 (size=93) 2024-11-18T18:50:13,503 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/oldWALs 2024-11-18T18:50:13,503 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731955813487) 2024-11-18T18:50:13,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T18:50:13,504 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:50:13,504 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:50:13,504 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:50:13,504 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:50:13,504 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T18:50:13,504 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T18:50:13,504 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1888771011, stopped=false 2024-11-18T18:50:13,504 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39fff3b0f89c,45039,1731955812150 2024-11-18T18:50:13,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,35551,1731955632182/39fff3b0f89c%2C35551%2C1731955632182.meta.1731955633205.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:13,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44763/user/jenkins/test-data/c9014b25-8ffe-b2fa-d3eb-4197173868f5/WALs/39fff3b0f89c,36311,1731955633450/39fff3b0f89c%2C36311%2C1731955633450.1731955633687 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T18:50:13,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:50:13,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T18:50:13,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:13,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:13,521 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:50:13,521 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T18:50:13,522 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:50:13,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:50:13,522 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:50:13,522 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T18:50:13,522 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39fff3b0f89c,33481,1731955812334' ***** 2024-11-18T18:50:13,522 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T18:50:13,522 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T18:50:13,522 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T18:50:13,522 INFO [RS:0;39fff3b0f89c:33481 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T18:50:13,522 INFO [RS:0;39fff3b0f89c:33481 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T18:50:13,522 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(959): stopping server 39fff3b0f89c,33481,1731955812334 2024-11-18T18:50:13,522 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:50:13,523 INFO [RS:0;39fff3b0f89c:33481 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39fff3b0f89c:33481. 2024-11-18T18:50:13,523 DEBUG [RS:0;39fff3b0f89c:33481 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T18:50:13,523 DEBUG [RS:0;39fff3b0f89c:33481 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:50:13,523 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T18:50:13,523 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T18:50:13,523 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T18:50:13,523 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T18:50:13,523 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-18T18:50:13,523 DEBUG [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-18T18:50:13,523 DEBUG [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T18:50:13,523 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T18:50:13,523 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T18:50:13,523 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T18:50:13,523 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T18:50:13,523 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T18:50:13,523 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-18T18:50:13,537 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/1588230740/.tmp/ns/06e2d03150ea485db07876f204a33003 is 43, key is default/ns:d/1731955813366/Put/seqid=0 2024-11-18T18:50:13,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741837_1013 (size=5153) 2024-11-18T18:50:13,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741837_1013 (size=5153) 2024-11-18T18:50:13,542 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/1588230740/.tmp/ns/06e2d03150ea485db07876f204a33003 2024-11-18T18:50:13,547 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/1588230740/.tmp/ns/06e2d03150ea485db07876f204a33003 as hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/1588230740/ns/06e2d03150ea485db07876f204a33003 2024-11-18T18:50:13,552 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/1588230740/ns/06e2d03150ea485db07876f204a33003, entries=2, sequenceid=6, filesize=5.0 K 2024-11-18T18:50:13,554 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-18T18:50:13,558 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-18T18:50:13,558 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T18:50:13,558 INFO [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T18:50:13,558 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731955813523Running coprocessor pre-close hooks at 1731955813523Disabling compacts and flushes for region at 1731955813523Disabling writes for close at 1731955813523Obtaining lock to block concurrent updates at 1731955813523Preparing flush snapshotting stores in 1588230740 at 1731955813523Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731955813524 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731955813524Flushing 1588230740/ns: creating writer at 1731955813524Flushing 1588230740/ns: appending metadata at 1731955813537 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731955813537Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cf8c726: reopening flushed file at 1731955813547 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1731955813554 (+7 ms)Writing region close event to WAL at 1731955813554Running coprocessor post-close hooks at 1731955813558 (+4 ms)Closed at 1731955813558 2024-11-18T18:50:13,558 DEBUG [RS_CLOSE_META-regionserver/39fff3b0f89c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T18:50:13,723 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(976): stopping server 39fff3b0f89c,33481,1731955812334; all regions closed. 2024-11-18T18:50:13,724 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,724 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,725 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,725 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,725 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741834_1010 (size=1152) 2024-11-18T18:50:13,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741834_1010 (size=1152) 2024-11-18T18:50:13,733 DEBUG [RS:0;39fff3b0f89c:33481 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/oldWALs 2024-11-18T18:50:13,733 INFO [RS:0;39fff3b0f89c:33481 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C33481%2C1731955812334.meta:.meta(num 1731955813292) 2024-11-18T18:50:13,734 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,734 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,735 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,735 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,735 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:13,736 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T18:50:13,736 INFO [regionserver/39fff3b0f89c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T18:50:13,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741833_1009 (size=93) 2024-11-18T18:50:13,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741833_1009 (size=93) 2024-11-18T18:50:13,742 DEBUG [RS:0;39fff3b0f89c:33481 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/oldWALs 2024-11-18T18:50:13,742 INFO [RS:0;39fff3b0f89c:33481 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39fff3b0f89c%2C33481%2C1731955812334:(num 1731955812840) 2024-11-18T18:50:13,742 DEBUG [RS:0;39fff3b0f89c:33481 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T18:50:13,742 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T18:50:13,742 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:50:13,742 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.ChoreService(370): Chore service for: regionserver/39fff3b0f89c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T18:50:13,742 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:50:13,742 INFO [regionserver/39fff3b0f89c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:50:13,742 INFO [RS:0;39fff3b0f89c:33481 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33481 2024-11-18T18:50:13,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39fff3b0f89c,33481,1731955812334 2024-11-18T18:50:13,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T18:50:13,751 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:50:13,760 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39fff3b0f89c,33481,1731955812334] 2024-11-18T18:50:13,768 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39fff3b0f89c,33481,1731955812334 already deleted, retry=false 2024-11-18T18:50:13,768 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39fff3b0f89c,33481,1731955812334 expired; onlineServers=0 2024-11-18T18:50:13,768 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39fff3b0f89c,45039,1731955812150' ***** 2024-11-18T18:50:13,768 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T18:50:13,768 INFO [M:0;39fff3b0f89c:45039 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T18:50:13,768 INFO [M:0;39fff3b0f89c:45039 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T18:50:13,768 DEBUG [M:0;39fff3b0f89c:45039 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T18:50:13,769 DEBUG [M:0;39fff3b0f89c:45039 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T18:50:13,769 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T18:50:13,769 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955812647 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.large.0-1731955812647,5,FailOnTimeoutGroup] 2024-11-18T18:50:13,769 DEBUG [master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955812647 {}] cleaner.HFileCleaner(306): Exit Thread[master/39fff3b0f89c:0:becomeActiveMaster-HFileCleaner.small.0-1731955812647,5,FailOnTimeoutGroup] 2024-11-18T18:50:13,769 INFO [M:0;39fff3b0f89c:45039 {}] hbase.ChoreService(370): Chore service for: master/39fff3b0f89c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T18:50:13,769 INFO [M:0;39fff3b0f89c:45039 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T18:50:13,769 DEBUG [M:0;39fff3b0f89c:45039 {}] master.HMaster(1795): Stopping service threads 2024-11-18T18:50:13,769 INFO [M:0;39fff3b0f89c:45039 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T18:50:13,769 INFO [M:0;39fff3b0f89c:45039 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T18:50:13,770 INFO [M:0;39fff3b0f89c:45039 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T18:50:13,770 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T18:50:13,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T18:50:13,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T18:50:13,780 DEBUG [M:0;39fff3b0f89c:45039 {}] zookeeper.ZKUtil(347): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T18:50:13,780 WARN [M:0;39fff3b0f89c:45039 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T18:50:13,781 INFO [M:0;39fff3b0f89c:45039 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/.lastflushedseqids 2024-11-18T18:50:13,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741838_1014 (size=99) 2024-11-18T18:50:13,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741838_1014 (size=99) 2024-11-18T18:50:13,792 INFO [M:0;39fff3b0f89c:45039 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T18:50:13,792 INFO [M:0;39fff3b0f89c:45039 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T18:50:13,792 DEBUG [M:0;39fff3b0f89c:45039 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T18:50:13,792 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:13,793 DEBUG [M:0;39fff3b0f89c:45039 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:13,793 DEBUG [M:0;39fff3b0f89c:45039 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T18:50:13,793 DEBUG [M:0;39fff3b0f89c:45039 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:13,793 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-18T18:50:13,810 DEBUG [M:0;39fff3b0f89c:45039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d2932a1810f74ad09b8db616e08c5592 is 82, key is hbase:meta,,1/info:regioninfo/1731955813323/Put/seqid=0 2024-11-18T18:50:13,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741839_1015 (size=5672) 2024-11-18T18:50:13,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741839_1015 (size=5672) 2024-11-18T18:50:13,815 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d2932a1810f74ad09b8db616e08c5592 2024-11-18T18:50:13,831 DEBUG [M:0;39fff3b0f89c:45039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3e9222a15f774772a7945cf5836101fb is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731955813370/Put/seqid=0 2024-11-18T18:50:13,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741840_1016 (size=5275) 2024-11-18T18:50:13,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741840_1016 (size=5275) 2024-11-18T18:50:13,836 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3e9222a15f774772a7945cf5836101fb 2024-11-18T18:50:13,852 DEBUG [M:0;39fff3b0f89c:45039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c994c638176142ad9e08b4d99bba858e is 69, key is 39fff3b0f89c,33481,1731955812334/rs:state/1731955812691/Put/seqid=0 2024-11-18T18:50:13,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741841_1017 (size=5156) 2024-11-18T18:50:13,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741841_1017 (size=5156) 2024-11-18T18:50:13,857 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c994c638176142ad9e08b4d99bba858e 2024-11-18T18:50:13,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:50:13,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33481-0x101509219100001, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:50:13,860 INFO [RS:0;39fff3b0f89c:33481 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:50:13,860 INFO [RS:0;39fff3b0f89c:33481 {}] regionserver.HRegionServer(1031): Exiting; stopping=39fff3b0f89c,33481,1731955812334; zookeeper connection closed. 2024-11-18T18:50:13,860 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@41e902aa {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@41e902aa 2024-11-18T18:50:13,861 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T18:50:13,873 DEBUG [M:0;39fff3b0f89c:45039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/817d05e23fb24006ab473a7e92588e5b is 52, key is load_balancer_on/state:d/1731955813473/Put/seqid=0 2024-11-18T18:50:13,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741842_1018 (size=5056) 2024-11-18T18:50:13,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741842_1018 (size=5056) 2024-11-18T18:50:14,280 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/817d05e23fb24006ab473a7e92588e5b 2024-11-18T18:50:14,291 DEBUG [M:0;39fff3b0f89c:45039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d2932a1810f74ad09b8db616e08c5592 as hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d2932a1810f74ad09b8db616e08c5592 2024-11-18T18:50:14,296 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d2932a1810f74ad09b8db616e08c5592, entries=8, sequenceid=29, filesize=5.5 K 2024-11-18T18:50:14,297 DEBUG [M:0;39fff3b0f89c:45039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3e9222a15f774772a7945cf5836101fb as hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3e9222a15f774772a7945cf5836101fb 2024-11-18T18:50:14,301 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3e9222a15f774772a7945cf5836101fb, entries=3, sequenceid=29, filesize=5.2 K 2024-11-18T18:50:14,302 DEBUG [M:0;39fff3b0f89c:45039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c994c638176142ad9e08b4d99bba858e as hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c994c638176142ad9e08b4d99bba858e 2024-11-18T18:50:14,306 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c994c638176142ad9e08b4d99bba858e, entries=1, sequenceid=29, filesize=5.0 K 2024-11-18T18:50:14,307 DEBUG [M:0;39fff3b0f89c:45039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/817d05e23fb24006ab473a7e92588e5b as hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/817d05e23fb24006ab473a7e92588e5b 2024-11-18T18:50:14,311 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46003/user/jenkins/test-data/e16094d8-57e9-c73f-fdb7-548c6e0fb63d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/817d05e23fb24006ab473a7e92588e5b, entries=1, sequenceid=29, filesize=4.9 K 2024-11-18T18:50:14,312 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 519ms, sequenceid=29, compaction requested=false 2024-11-18T18:50:14,314 INFO [M:0;39fff3b0f89c:45039 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T18:50:14,314 DEBUG [M:0;39fff3b0f89c:45039 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731955813792Disabling compacts and flushes for region at 1731955813792Disabling writes for close at 1731955813793 (+1 ms)Obtaining lock to block concurrent updates at 1731955813793Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731955813793Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731955813794 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731955813795 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731955813795Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731955813810 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731955813810Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731955813818 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731955813831 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731955813831Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731955813839 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731955813851 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731955813851Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731955813860 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731955813873 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731955813873Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17dc0043: reopening flushed file at 1731955814290 (+417 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29c07097: reopening flushed file at 1731955814296 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30bda71a: reopening flushed file at 1731955814301 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3395d5bc: reopening flushed file at 1731955814306 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 519ms, sequenceid=29, compaction requested=false at 1731955814312 (+6 ms)Writing region close event to WAL at 1731955814314 (+2 ms)Closed at 1731955814314 2024-11-18T18:50:14,314 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:14,315 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:14,315 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:14,315 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:14,315 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T18:50:14,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39017 is added to blk_1073741830_1006 (size=10311) 2024-11-18T18:50:14,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45329 is added to blk_1073741830_1006 (size=10311) 2024-11-18T18:50:14,318 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T18:50:14,318 INFO [M:0;39fff3b0f89c:45039 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T18:50:14,318 INFO [M:0;39fff3b0f89c:45039 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45039 2024-11-18T18:50:14,318 INFO [M:0;39fff3b0f89c:45039 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T18:50:14,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:50:14,452 INFO [M:0;39fff3b0f89c:45039 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T18:50:14,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45039-0x101509219100000, quorum=127.0.0.1:64585, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T18:50:14,455 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@607ad43a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:50:14,456 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4420c496{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:50:14,456 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:50:14,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c071501{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:50:14,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c447438{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/hadoop.log.dir/,STOPPED} 2024-11-18T18:50:14,459 WARN [BP-924774165-172.17.0.2-1731955810261 heartbeating to localhost/127.0.0.1:46003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:50:14,459 WARN [BP-924774165-172.17.0.2-1731955810261 heartbeating to localhost/127.0.0.1:46003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-924774165-172.17.0.2-1731955810261 (Datanode Uuid ef311c7b-1339-47b9-aba2-bef66aa91996) service to localhost/127.0.0.1:46003 2024-11-18T18:50:14,459 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:50:14,459 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:50:14,460 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984/data/data3/current/BP-924774165-172.17.0.2-1731955810261 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:50:14,461 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984/data/data4/current/BP-924774165-172.17.0.2-1731955810261 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:50:14,461 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:50:14,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@442cd635{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T18:50:14,463 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d1d035a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:50:14,463 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:50:14,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33816e95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:50:14,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d8eb6c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/hadoop.log.dir/,STOPPED} 2024-11-18T18:50:14,464 WARN [BP-924774165-172.17.0.2-1731955810261 heartbeating to localhost/127.0.0.1:46003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T18:50:14,464 WARN [BP-924774165-172.17.0.2-1731955810261 heartbeating to localhost/127.0.0.1:46003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-924774165-172.17.0.2-1731955810261 (Datanode Uuid 797b505a-0ba9-4fae-8691-e7a02dbdfdbc) service to localhost/127.0.0.1:46003 2024-11-18T18:50:14,464 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T18:50:14,464 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T18:50:14,465 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984/data/data1/current/BP-924774165-172.17.0.2-1731955810261 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:50:14,465 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/cluster_bfb329d5-fedf-0421-28a1-f59cd0f46984/data/data2/current/BP-924774165-172.17.0.2-1731955810261 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T18:50:14,465 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T18:50:14,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b05d1eb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T18:50:14,470 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a377f89{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T18:50:14,470 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T18:50:14,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b907417{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T18:50:14,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c18422f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a81e4bdd-99a8-724e-ca8b-8a7b68b61d00/hadoop.log.dir/,STOPPED} 2024-11-18T18:50:14,475 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T18:50:14,491 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T18:50:14,498 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 229) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:46003 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46003 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:46003 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46003 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (874278612) connection to localhost/127.0.0.1:46003 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46003 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46003 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:46003 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=144 (was 131) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4661 (was 4667)